From 6904a6520d3b5599404b339577c7c3311e635da9 Mon Sep 17 00:00:00 2001 From: Shantanu Alshi Date: Mon, 6 May 2024 17:14:15 +0530 Subject: [PATCH 01/47] fix: Fix log level detection (#12651) --- docs/sources/shared/configuration.md | 6 +- go.mod | 1 + go.sum | 2 + pkg/distributor/distributor.go | 161 ++- pkg/distributor/distributor_test.go | 155 +- pkg/validation/limits.go | 2 +- vendor/github.com/buger/jsonparser/.gitignore | 12 + .../github.com/buger/jsonparser/.travis.yml | 11 + vendor/github.com/buger/jsonparser/Dockerfile | 12 + vendor/github.com/buger/jsonparser/LICENSE | 21 + vendor/github.com/buger/jsonparser/Makefile | 36 + vendor/github.com/buger/jsonparser/README.md | 365 +++++ vendor/github.com/buger/jsonparser/bytes.go | 47 + .../github.com/buger/jsonparser/bytes_safe.go | 25 + .../buger/jsonparser/bytes_unsafe.go | 44 + vendor/github.com/buger/jsonparser/escape.go | 173 +++ vendor/github.com/buger/jsonparser/fuzz.go | 117 ++ .../buger/jsonparser/oss-fuzz-build.sh | 47 + vendor/github.com/buger/jsonparser/parser.go | 1283 +++++++++++++++++ vendor/modules.txt | 3 + 20 files changed, 2453 insertions(+), 70 deletions(-) create mode 100644 vendor/github.com/buger/jsonparser/.gitignore create mode 100644 vendor/github.com/buger/jsonparser/.travis.yml create mode 100644 vendor/github.com/buger/jsonparser/Dockerfile create mode 100644 vendor/github.com/buger/jsonparser/LICENSE create mode 100644 vendor/github.com/buger/jsonparser/Makefile create mode 100644 vendor/github.com/buger/jsonparser/README.md create mode 100644 vendor/github.com/buger/jsonparser/bytes.go create mode 100644 vendor/github.com/buger/jsonparser/bytes_safe.go create mode 100644 vendor/github.com/buger/jsonparser/bytes_unsafe.go create mode 100644 vendor/github.com/buger/jsonparser/escape.go create mode 100644 vendor/github.com/buger/jsonparser/fuzz.go create mode 100644 vendor/github.com/buger/jsonparser/oss-fuzz-build.sh create mode 100644 vendor/github.com/buger/jsonparser/parser.go diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index 208def0cdd521..674bb09ff0b9d 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -2925,8 +2925,10 @@ The `limits_config` block configures global and per-tenant limits in Loki. The v [discover_service_name: | default = [service app application name app_kubernetes_io_name container container_name component workload job]] # Discover and add log levels during ingestion, if not present already. Levels -# would be added to Structured Metadata with name 'level' and one of the values -# from 'debug', 'info', 'warn', 'error', 'critical', 'fatal'. +# would be added to Structured Metadata with name +# level/LEVEL/Level/Severity/severity/SEVERITY/lvl/LVL/Lvl (case-sensitive) and +# one of the values from 'trace', 'debug', 'info', 'warn', 'error', 'critical', +# 'fatal' (case insensitive). # CLI flag: -validation.discover-log-levels [discover_log_levels: | default = true] diff --git a/go.mod b/go.mod index 06969fdeed2e1..f1779c1be01bc 100644 --- a/go.mod +++ b/go.mod @@ -119,6 +119,7 @@ require ( github.com/IBM/go-sdk-core/v5 v5.13.1 github.com/IBM/ibm-cos-sdk-go v1.10.0 github.com/axiomhq/hyperloglog v0.0.0-20240124082744-24bca3a5b39b + github.com/buger/jsonparser v1.1.1 github.com/d4l3k/messagediff v1.2.1 github.com/dolthub/swiss v0.2.1 github.com/efficientgo/core v1.0.0-rc.2 diff --git a/go.sum b/go.sum index a873e59ac8dd2..df44df356fc8f 100644 --- a/go.sum +++ b/go.sum @@ -401,6 +401,8 @@ github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9 github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY= github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/caddyserver/caddy v1.0.4/go.mod h1:uruyfVsyMcDb3IOzSKsi1x0wOjy1my/PxOSTcD+24jM= diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index eae29a57c9055..268db96e897ac 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -12,8 +12,10 @@ import ( "time" "unicode" + "github.com/buger/jsonparser" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/go-logfmt/logfmt" "github.com/gogo/status" "github.com/prometheus/prometheus/model/labels" "go.opentelemetry.io/collector/pdata/plog" @@ -59,13 +61,15 @@ const ( labelServiceName = "service_name" serviceUnknown = "unknown_service" - labelLevel = "level" + levelLabel = "detected_level" logLevelDebug = "debug" logLevelInfo = "info" logLevelWarn = "warn" logLevelError = "error" logLevelFatal = "fatal" logLevelCritical = "critical" + logLevelTrace = "trace" + logLevelUnknown = "unknown" ) var ( @@ -73,6 +77,12 @@ var ( rfStats = analytics.NewInt("distributor_replication_factor") ) +var allowedLabelsForLevel = map[string]struct{}{ + "level": {}, "LEVEL": {}, "Level": {}, + "severity": {}, "SEVERITY": {}, "Severity": {}, + "lvl": {}, "LVL": {}, "Lvl": {}, +} + // Config for a Distributor. type Config struct { // Distributors ring @@ -376,7 +386,9 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log n := 0 pushSize := 0 prevTs := stream.Entries[0].Timestamp - addLogLevel := validationContext.allowStructuredMetadata && validationContext.discoverLogLevels && !lbs.Has(labelLevel) + + shouldDiscoverLevels := validationContext.allowStructuredMetadata && validationContext.discoverLogLevels + levelFromLabel, hasLevelLabel := hasAnyLevelLabels(lbs) for _, entry := range stream.Entries { if err := d.validator.ValidateEntry(ctx, validationContext, lbs, entry); err != nil { d.writeFailuresManager.Log(tenantID, err) @@ -385,12 +397,21 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log } structuredMetadata := logproto.FromLabelAdaptersToLabels(entry.StructuredMetadata) - if addLogLevel && !structuredMetadata.Has(labelLevel) { - logLevel := detectLogLevelFromLogEntry(entry, structuredMetadata) - entry.StructuredMetadata = append(entry.StructuredMetadata, logproto.LabelAdapter{ - Name: labelLevel, - Value: logLevel, - }) + if shouldDiscoverLevels { + var logLevel string + if hasLevelLabel { + logLevel = levelFromLabel + } else if levelFromMetadata, ok := hasAnyLevelLabels(structuredMetadata); ok { + logLevel = levelFromMetadata + } else { + logLevel = detectLogLevelFromLogEntry(entry, structuredMetadata) + } + if logLevel != logLevelUnknown && logLevel != "" { + entry.StructuredMetadata = append(entry.StructuredMetadata, logproto.LabelAdapter{ + Name: levelLabel, + Value: logLevel, + }) + } } stream.Entries[n] = entry @@ -537,6 +558,15 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log } } +func hasAnyLevelLabels(l labels.Labels) (string, bool) { + for lbl := range allowedLabelsForLevel { + if l.Has(lbl) { + return l.Get(lbl), true + } + } + return "", false +} + // shardStream shards (divides) the given stream into N smaller streams, where // N is the sharding size for the given stream. shardSteam returns the smaller // streams and their associated keys for hashing to ingesters. @@ -865,7 +895,11 @@ func detectLogLevelFromLogEntry(entry logproto.Entry, structuredMetadata labels. if err != nil { return logLevelInfo } - if otlpSeverityNumber <= int(plog.SeverityNumberDebug4) { + if otlpSeverityNumber == int(plog.SeverityNumberUnspecified) { + return logLevelUnknown + } else if otlpSeverityNumber <= int(plog.SeverityNumberTrace4) { + return logLevelTrace + } else if otlpSeverityNumber <= int(plog.SeverityNumberDebug4) { return logLevelDebug } else if otlpSeverityNumber <= int(plog.SeverityNumberInfo4) { return logLevelInfo @@ -876,18 +910,68 @@ func detectLogLevelFromLogEntry(entry logproto.Entry, structuredMetadata labels. } else if otlpSeverityNumber <= int(plog.SeverityNumberFatal4) { return logLevelFatal } - return logLevelInfo + return logLevelUnknown } return extractLogLevelFromLogLine(entry.Line) } func extractLogLevelFromLogLine(log string) string { - // check for log levels in known log formats to avoid any false detection + var v string + if isJSON(log) { + v = getValueUsingJSONParser(log) + } else { + v = getValueUsingLogfmtParser(log) + } - // json logs: + switch strings.ToLower(v) { + case "trace", "trc": + return logLevelTrace + case "debug", "dbg": + return logLevelDebug + case "info", "inf": + return logLevelInfo + case "warn", "wrn": + return logLevelWarn + case "error", "err": + return logLevelError + case "critical": + return logLevelCritical + case "fatal": + return logLevelFatal + default: + return detectLevelFromLogLine(log) + } +} + +func getValueUsingLogfmtParser(line string) string { + equalIndex := strings.Index(line, "=") + if len(line) == 0 || equalIndex == -1 { + return logLevelUnknown + } + d := logfmt.NewDecoder(strings.NewReader(line)) + d.ScanRecord() + for d.ScanKeyval() { + if _, ok := allowedLabelsForLevel[string(d.Key())]; ok { + return string(d.Value()) + } + } + return logLevelUnknown +} + +func getValueUsingJSONParser(log string) string { + for allowedLabel := range allowedLabelsForLevel { + l, err := jsonparser.GetString([]byte(log), allowedLabel) + if err == nil { + return l + } + } + return logLevelUnknown +} + +func isJSON(line string) bool { var firstNonSpaceChar rune - for _, char := range log { + for _, char := range line { if !unicode.IsSpace(char) { firstNonSpaceChar = char break @@ -895,55 +979,18 @@ func extractLogLevelFromLogLine(log string) string { } var lastNonSpaceChar rune - for i := len(log) - 1; i >= 0; i-- { - char := rune(log[i]) + for i := len(line) - 1; i >= 0; i-- { + char := rune(line[i]) if !unicode.IsSpace(char) { lastNonSpaceChar = char break } } - if firstNonSpaceChar == '{' && lastNonSpaceChar == '}' { - if strings.Contains(log, `:"err"`) || strings.Contains(log, `:"ERR"`) || - strings.Contains(log, `:"error"`) || strings.Contains(log, `:"ERROR"`) { - return logLevelError - } - if strings.Contains(log, `:"warn"`) || strings.Contains(log, `:"WARN"`) || - strings.Contains(log, `:"warning"`) || strings.Contains(log, `:"WARNING"`) { - return logLevelWarn - } - if strings.Contains(log, `:"critical"`) || strings.Contains(log, `:"CRITICAL"`) { - return logLevelCritical - } - if strings.Contains(log, `:"debug"`) || strings.Contains(log, `:"DEBUG"`) { - return logLevelDebug - } - if strings.Contains(log, `:"info"`) || strings.Contains(log, `:"INFO"`) { - return logLevelInfo - } - } - - // logfmt logs: - if strings.Contains(log, "=") { - if strings.Contains(log, "=err") || strings.Contains(log, "=ERR") || - strings.Contains(log, "=error") || strings.Contains(log, "=ERROR") { - return logLevelError - } - if strings.Contains(log, "=warn") || strings.Contains(log, "=WARN") || - strings.Contains(log, "=warning") || strings.Contains(log, "=WARNING") { - return logLevelWarn - } - if strings.Contains(log, "=critical") || strings.Contains(log, "=CRITICAL") { - return logLevelCritical - } - if strings.Contains(log, "=debug") || strings.Contains(log, "=DEBUG") { - return logLevelDebug - } - if strings.Contains(log, "=info") || strings.Contains(log, "=INFO") { - return logLevelInfo - } - } + return firstNonSpaceChar == '{' && lastNonSpaceChar == '}' +} +func detectLevelFromLogLine(log string) string { if strings.Contains(log, "err:") || strings.Contains(log, "ERR:") || strings.Contains(log, "error") || strings.Contains(log, "ERROR") { return logLevelError @@ -958,7 +1005,5 @@ func extractLogLevelFromLogLine(log string) string { if strings.Contains(log, "debug:") || strings.Contains(log, "DEBUG:") { return logLevelDebug } - - // Default to info if no specific level is found - return logLevelInfo + return logLevelUnknown } diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index f68e5f3a701ab..dc58f758835c0 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -1364,6 +1364,29 @@ func prepare(t *testing.T, numDistributors, numIngesters int, limits *validation return distributors, ingesters } +func makeWriteRequestWithLabelsWithLevel(lines, size int, labels []string, level string) *logproto.PushRequest { + streams := make([]logproto.Stream, len(labels)) + for i := 0; i < len(labels); i++ { + stream := logproto.Stream{Labels: labels[i]} + + for j := 0; j < lines; j++ { + // Construct the log line, honoring the input size + line := "msg=" + strconv.Itoa(j) + strings.Repeat("0", size) + " severity=" + level + + stream.Entries = append(stream.Entries, logproto.Entry{ + Timestamp: time.Now().Add(time.Duration(j) * time.Millisecond), + Line: line, + }) + } + + streams[i] = stream + } + + return &logproto.PushRequest{ + Streams: streams, + } +} + func makeWriteRequestWithLabels(lines, size int, labels []string) *logproto.PushRequest { streams := make([]logproto.Stream, len(labels)) for i := 0; i < len(labels); i++ { @@ -1536,7 +1559,7 @@ func Test_DetectLogLevels(t *testing.T) { require.Len(t, topVal.Streams[0].Entries[0].StructuredMetadata, 0) }) - t.Run("log level detection enabled", func(t *testing.T) { + t.Run("log level detection enabled but level cannot be detected", func(t *testing.T) { limits, ingester := setup(true) distributors, _ := prepare(t, 1, 5, limits, func(addr string) (ring_client.PoolClient, error) { return ingester, nil }) @@ -1545,10 +1568,22 @@ func Test_DetectLogLevels(t *testing.T) { require.NoError(t, err) topVal := ingester.Peek() require.Equal(t, `{foo="bar"}`, topVal.Streams[0].Labels) + require.Len(t, topVal.Streams[0].Entries[0].StructuredMetadata, 0) + }) + + t.Run("log level detection enabled and warn logs", func(t *testing.T) { + limits, ingester := setup(true) + distributors, _ := prepare(t, 1, 5, limits, func(addr string) (ring_client.PoolClient, error) { return ingester, nil }) + + writeReq := makeWriteRequestWithLabelsWithLevel(1, 10, []string{`{foo="bar"}`}, "warn") + _, err := distributors[0].Push(ctx, writeReq) + require.NoError(t, err) + topVal := ingester.Peek() + require.Equal(t, `{foo="bar"}`, topVal.Streams[0].Labels) require.Equal(t, push.LabelsAdapter{ { - Name: labelLevel, - Value: logLevelInfo, + Name: levelLabel, + Value: logLevelWarn, }, }, topVal.Streams[0].Entries[0].StructuredMetadata) }) @@ -1562,7 +1597,10 @@ func Test_DetectLogLevels(t *testing.T) { require.NoError(t, err) topVal := ingester.Peek() require.Equal(t, `{foo="bar", level="debug"}`, topVal.Streams[0].Labels) - require.Len(t, topVal.Streams[0].Entries[0].StructuredMetadata, 0) + sm := topVal.Streams[0].Entries[0].StructuredMetadata + require.Len(t, sm, 1) + require.Equal(t, sm[0].Name, levelLabel) + require.Equal(t, sm[0].Value, logLevelDebug) }) t.Run("log level detection enabled but log level already present as structured metadata", func(t *testing.T) { @@ -1572,7 +1610,7 @@ func Test_DetectLogLevels(t *testing.T) { writeReq := makeWriteRequestWithLabels(1, 10, []string{`{foo="bar"}`}) writeReq.Streams[0].Entries[0].StructuredMetadata = push.LabelsAdapter{ { - Name: labelLevel, + Name: "severity", Value: logLevelWarn, }, } @@ -1580,12 +1618,16 @@ func Test_DetectLogLevels(t *testing.T) { require.NoError(t, err) topVal := ingester.Peek() require.Equal(t, `{foo="bar"}`, topVal.Streams[0].Labels) + sm := topVal.Streams[0].Entries[0].StructuredMetadata require.Equal(t, push.LabelsAdapter{ { - Name: labelLevel, + Name: "severity", + Value: logLevelWarn, + }, { + Name: levelLabel, Value: logLevelWarn, }, - }, topVal.Streams[0].Entries[0].StructuredMetadata) + }, sm) }) } @@ -1625,7 +1667,7 @@ func Test_detectLogLevelFromLogEntry(t *testing.T) { entry: logproto.Entry{ Line: "foo", }, - expectedLogLevel: logLevelInfo, + expectedLogLevel: logLevelUnknown, }, { name: "non otlp with log level keywords in log line", @@ -1637,10 +1679,38 @@ func Test_detectLogLevelFromLogEntry(t *testing.T) { { name: "json log line with an error", entry: logproto.Entry{ - Line: `{"foo":"bar",msg:"message with keyword error but it should not get picked up",level":"critical"}`, + Line: `{"foo":"bar","msg":"message with keyword error but it should not get picked up","level":"critical"}`, }, expectedLogLevel: logLevelCritical, }, + { + name: "json log line with an error", + entry: logproto.Entry{ + Line: `{"FOO":"bar","MSG":"message with keyword error but it should not get picked up","LEVEL":"Critical"}`, + }, + expectedLogLevel: logLevelCritical, + }, + { + name: "json log line with an warning", + entry: logproto.Entry{ + Line: `{"foo":"bar","msg":"message with keyword warn but it should not get picked up","level":"warn"}`, + }, + expectedLogLevel: logLevelWarn, + }, + { + name: "json log line with an warning", + entry: logproto.Entry{ + Line: `{"foo":"bar","msg":"message with keyword warn but it should not get picked up","SEVERITY":"FATAL"}`, + }, + expectedLogLevel: logLevelFatal, + }, + { + name: "json log line with an error in block case", + entry: logproto.Entry{ + Line: `{"foo":"bar","msg":"message with keyword warn but it should not get picked up","level":"ERR"}`, + }, + expectedLogLevel: logLevelError, + }, { name: "logfmt log line with a warn", entry: logproto.Entry{ @@ -1648,6 +1718,55 @@ func Test_detectLogLevelFromLogEntry(t *testing.T) { }, expectedLogLevel: logLevelWarn, }, + { + name: "logfmt log line with a warn with camel case", + entry: logproto.Entry{ + Line: `foo=bar msg="message with keyword error but it should not get picked up" level=Warn`, + }, + expectedLogLevel: logLevelWarn, + }, + { + name: "logfmt log line with a trace", + entry: logproto.Entry{ + Line: `foo=bar msg="message with keyword error but it should not get picked up" level=Trace`, + }, + expectedLogLevel: logLevelTrace, + }, + { + name: "logfmt log line with some other level returns unknown log level", + entry: logproto.Entry{ + Line: `foo=bar msg="message with keyword but it should not get picked up" level=NA`, + }, + expectedLogLevel: logLevelUnknown, + }, + { + name: "logfmt log line with label Severity is allowed for level detection", + entry: logproto.Entry{ + Line: `foo=bar msg="message with keyword but it should not get picked up" severity=critical`, + }, + expectedLogLevel: logLevelCritical, + }, + { + name: "logfmt log line with label Severity with camelcase is allowed for level detection", + entry: logproto.Entry{ + Line: `Foo=bar MSG="Message with keyword but it should not get picked up" Severity=critical`, + }, + expectedLogLevel: logLevelCritical, + }, + { + name: "logfmt log line with a info with non standard case", + entry: logproto.Entry{ + Line: `foo=bar msg="message with keyword error but it should not get picked up" level=inFO`, + }, + expectedLogLevel: logLevelInfo, + }, + { + name: "logfmt log line with a info with non block case for level", + entry: logproto.Entry{ + Line: `FOO=bar MSG="message with keyword error but it should not get picked up" LEVEL=inFO`, + }, + expectedLogLevel: logLevelInfo, + }, } { t.Run(tc.name, func(t *testing.T) { detectedLogLevel := detectLogLevelFromLogEntry(tc.entry, logproto.FromLabelAdaptersToLabels(tc.entry.StructuredMetadata)) @@ -1669,6 +1788,24 @@ func Benchmark_extractLogLevelFromLogLine(b *testing.B) { "RJtBuW ABOqQHLSlNuUw ZlM2nGS2 jwA7cXEOJhY 3oPv4gGAz Uqdre16MF92C06jOH dayqTCK8XmIilT uvgywFSfNadYvRDQa " + "iUbswJNcwqcr6huw LAGrZS8NGlqqzcD2wFU rm Uqcrh3TKLUCkfkwLm 5CIQbxMCUz boBrEHxvCBrUo YJoF2iyif4xq3q yk " + for i := 0; i < b.N; i++ { + level := extractLogLevelFromLogLine(logLine) + require.Equal(b, logLevelUnknown, level) + } +} + +func Benchmark_optParseExtractLogLevelFromLogLineJson(b *testing.B) { + logLine := `{"msg": "something" , "level": "error", "id": "1"}` + + for i := 0; i < b.N; i++ { + level := extractLogLevelFromLogLine(logLine) + require.Equal(b, logLevelError, level) + } +} + +func Benchmark_optParseExtractLogLevelFromLogLineLogfmt(b *testing.B) { + logLine := `FOO=bar MSG="message with keyword error but it should not get picked up" LEVEL=inFO` + for i := 0; i < b.N; i++ { level := extractLogLevelFromLogLine(logLine) require.Equal(b, logLevelInfo, level) diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index 036f5660c0929..27e6702dc9889 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -258,7 +258,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { "job", } f.Var((*dskit_flagext.StringSlice)(&l.DiscoverServiceName), "validation.discover-service-name", "If no service_name label exists, Loki maps a single label from the configured list to service_name. If none of the configured labels exist in the stream, label is set to unknown_service. Empty list disables setting the label.") - f.BoolVar(&l.DiscoverLogLevels, "validation.discover-log-levels", true, "Discover and add log levels during ingestion, if not present already. Levels would be added to Structured Metadata with name 'level' and one of the values from 'debug', 'info', 'warn', 'error', 'critical', 'fatal'.") + f.BoolVar(&l.DiscoverLogLevels, "validation.discover-log-levels", true, "Discover and add log levels during ingestion, if not present already. Levels would be added to Structured Metadata with name level/LEVEL/Level/Severity/severity/SEVERITY/lvl/LVL/Lvl (case-sensitive) and one of the values from 'trace', 'debug', 'info', 'warn', 'error', 'critical', 'fatal' (case insensitive).") _ = l.RejectOldSamplesMaxAge.Set("7d") f.Var(&l.RejectOldSamplesMaxAge, "validation.reject-old-samples.max-age", "Maximum accepted sample age before rejecting.") diff --git a/vendor/github.com/buger/jsonparser/.gitignore b/vendor/github.com/buger/jsonparser/.gitignore new file mode 100644 index 0000000000000..5598d8a5691a5 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/.gitignore @@ -0,0 +1,12 @@ + +*.test + +*.out + +*.mprof + +.idea + +vendor/github.com/buger/goterm/ +prof.cpu +prof.mem diff --git a/vendor/github.com/buger/jsonparser/.travis.yml b/vendor/github.com/buger/jsonparser/.travis.yml new file mode 100644 index 0000000000000..dbfb7cf988305 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/.travis.yml @@ -0,0 +1,11 @@ +language: go +arch: + - amd64 + - ppc64le +go: + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x +script: go test -v ./. diff --git a/vendor/github.com/buger/jsonparser/Dockerfile b/vendor/github.com/buger/jsonparser/Dockerfile new file mode 100644 index 0000000000000..37fc9fd0b4d63 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/Dockerfile @@ -0,0 +1,12 @@ +FROM golang:1.6 + +RUN go get github.com/Jeffail/gabs +RUN go get github.com/bitly/go-simplejson +RUN go get github.com/pquerna/ffjson +RUN go get github.com/antonholmquist/jason +RUN go get github.com/mreiferson/go-ujson +RUN go get -tags=unsafe -u github.com/ugorji/go/codec +RUN go get github.com/mailru/easyjson + +WORKDIR /go/src/github.com/buger/jsonparser +ADD . /go/src/github.com/buger/jsonparser \ No newline at end of file diff --git a/vendor/github.com/buger/jsonparser/LICENSE b/vendor/github.com/buger/jsonparser/LICENSE new file mode 100644 index 0000000000000..ac25aeb7da280 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 Leonid Bugaev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/buger/jsonparser/Makefile b/vendor/github.com/buger/jsonparser/Makefile new file mode 100644 index 0000000000000..e843368cf103d --- /dev/null +++ b/vendor/github.com/buger/jsonparser/Makefile @@ -0,0 +1,36 @@ +SOURCE = parser.go +CONTAINER = jsonparser +SOURCE_PATH = /go/src/github.com/buger/jsonparser +BENCHMARK = JsonParser +BENCHTIME = 5s +TEST = . +DRUN = docker run -v `pwd`:$(SOURCE_PATH) -i -t $(CONTAINER) + +build: + docker build -t $(CONTAINER) . + +race: + $(DRUN) --env GORACE="halt_on_error=1" go test ./. $(ARGS) -v -race -timeout 15s + +bench: + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -benchtime $(BENCHTIME) -v + +bench_local: + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench . $(ARGS) -benchtime $(BENCHTIME) -v + +profile: + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -memprofile mem.mprof -v + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -cpuprofile cpu.out -v + $(DRUN) go test $(LDFLAGS) -test.benchmem -bench $(BENCHMARK) ./benchmark/ $(ARGS) -c + +test: + $(DRUN) go test $(LDFLAGS) ./ -run $(TEST) -timeout 10s $(ARGS) -v + +fmt: + $(DRUN) go fmt ./... + +vet: + $(DRUN) go vet ./. + +bash: + $(DRUN) /bin/bash \ No newline at end of file diff --git a/vendor/github.com/buger/jsonparser/README.md b/vendor/github.com/buger/jsonparser/README.md new file mode 100644 index 0000000000000..d7e0ec397affe --- /dev/null +++ b/vendor/github.com/buger/jsonparser/README.md @@ -0,0 +1,365 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/buger/jsonparser)](https://goreportcard.com/report/github.com/buger/jsonparser) ![License](https://img.shields.io/dub/l/vibe-d.svg) +# Alternative JSON parser for Go (10x times faster standard library) + +It does not require you to know the structure of the payload (eg. create structs), and allows accessing fields by providing the path to them. It is up to **10 times faster** than standard `encoding/json` package (depending on payload size and usage), **allocates no memory**. See benchmarks below. + +## Rationale +Originally I made this for a project that relies on a lot of 3rd party APIs that can be unpredictable and complex. +I love simplicity and prefer to avoid external dependecies. `encoding/json` requires you to know exactly your data structures, or if you prefer to use `map[string]interface{}` instead, it will be very slow and hard to manage. +I investigated what's on the market and found that most libraries are just wrappers around `encoding/json`, there is few options with own parsers (`ffjson`, `easyjson`), but they still requires you to create data structures. + + +Goal of this project is to push JSON parser to the performance limits and not sacrifice with compliance and developer user experience. + +## Example +For the given JSON our goal is to extract the user's full name, number of github followers and avatar. + +```go +import "github.com/buger/jsonparser" + +... + +data := []byte(`{ + "person": { + "name": { + "first": "Leonid", + "last": "Bugaev", + "fullName": "Leonid Bugaev" + }, + "github": { + "handle": "buger", + "followers": 109 + }, + "avatars": [ + { "url": "https://avatars1.githubusercontent.com/u/14009?v=3&s=460", "type": "thumbnail" } + ] + }, + "company": { + "name": "Acme" + } +}`) + +// You can specify key path by providing arguments to Get function +jsonparser.Get(data, "person", "name", "fullName") + +// There is `GetInt` and `GetBoolean` helpers if you exactly know key data type +jsonparser.GetInt(data, "person", "github", "followers") + +// When you try to get object, it will return you []byte slice pointer to data containing it +// In `company` it will be `{"name": "Acme"}` +jsonparser.Get(data, "company") + +// If the key doesn't exist it will throw an error +var size int64 +if value, err := jsonparser.GetInt(data, "company", "size"); err == nil { + size = value +} + +// You can use `ArrayEach` helper to iterate items [item1, item2 .... itemN] +jsonparser.ArrayEach(data, func(value []byte, dataType jsonparser.ValueType, offset int, err error) { + fmt.Println(jsonparser.Get(value, "url")) +}, "person", "avatars") + +// Or use can access fields by index! +jsonparser.GetString(data, "person", "avatars", "[0]", "url") + +// You can use `ObjectEach` helper to iterate objects { "key1":object1, "key2":object2, .... "keyN":objectN } +jsonparser.ObjectEach(data, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + fmt.Printf("Key: '%s'\n Value: '%s'\n Type: %s\n", string(key), string(value), dataType) + return nil +}, "person", "name") + +// The most efficient way to extract multiple keys is `EachKey` + +paths := [][]string{ + []string{"person", "name", "fullName"}, + []string{"person", "avatars", "[0]", "url"}, + []string{"company", "url"}, +} +jsonparser.EachKey(data, func(idx int, value []byte, vt jsonparser.ValueType, err error){ + switch idx { + case 0: // []string{"person", "name", "fullName"} + ... + case 1: // []string{"person", "avatars", "[0]", "url"} + ... + case 2: // []string{"company", "url"}, + ... + } +}, paths...) + +// For more information see docs below +``` + +## Need to speedup your app? + +I'm available for consulting and can help you push your app performance to the limits. Ping me at: leonsbox@gmail.com. + +## Reference + +Library API is really simple. You just need the `Get` method to perform any operation. The rest is just helpers around it. + +You also can view API at [godoc.org](https://godoc.org/github.com/buger/jsonparser) + + +### **`Get`** +```go +func Get(data []byte, keys ...string) (value []byte, dataType jsonparser.ValueType, offset int, err error) +``` +Receives data structure, and key path to extract value from. + +Returns: +* `value` - Pointer to original data structure containing key value, or just empty slice if nothing found or error +* `dataType` - Can be: `NotExist`, `String`, `Number`, `Object`, `Array`, `Boolean` or `Null` +* `offset` - Offset from provided data structure where key value ends. Used mostly internally, for example for `ArrayEach` helper. +* `err` - If the key is not found or any other parsing issue, it should return error. If key not found it also sets `dataType` to `NotExist` + +Accepts multiple keys to specify path to JSON value (in case of quering nested structures). +If no keys are provided it will try to extract the closest JSON value (simple ones or object/array), useful for reading streams or arrays, see `ArrayEach` implementation. + +Note that keys can be an array indexes: `jsonparser.GetInt("person", "avatars", "[0]", "url")`, pretty cool, yeah? + +### **`GetString`** +```go +func GetString(data []byte, keys ...string) (val string, err error) +``` +Returns strings properly handing escaped and unicode characters. Note that this will cause additional memory allocations. + +### **`GetUnsafeString`** +If you need string in your app, and ready to sacrifice with support of escaped symbols in favor of speed. It returns string mapped to existing byte slice memory, without any allocations: +```go +s, _, := jsonparser.GetUnsafeString(data, "person", "name", "title") +switch s { + case 'CEO': + ... + case 'Engineer' + ... + ... +} +``` +Note that `unsafe` here means that your string will exist until GC will free underlying byte slice, for most of cases it means that you can use this string only in current context, and should not pass it anywhere externally: through channels or any other way. + + +### **`GetBoolean`**, **`GetInt`** and **`GetFloat`** +```go +func GetBoolean(data []byte, keys ...string) (val bool, err error) + +func GetFloat(data []byte, keys ...string) (val float64, err error) + +func GetInt(data []byte, keys ...string) (val int64, err error) +``` +If you know the key type, you can use the helpers above. +If key data type do not match, it will return error. + +### **`ArrayEach`** +```go +func ArrayEach(data []byte, cb func(value []byte, dataType jsonparser.ValueType, offset int, err error), keys ...string) +``` +Needed for iterating arrays, accepts a callback function with the same return arguments as `Get`. + +### **`ObjectEach`** +```go +func ObjectEach(data []byte, callback func(key []byte, value []byte, dataType ValueType, offset int) error, keys ...string) (err error) +``` +Needed for iterating object, accepts a callback function. Example: +```go +var handler func([]byte, []byte, jsonparser.ValueType, int) error +handler = func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error { + //do stuff here +} +jsonparser.ObjectEach(myJson, handler) +``` + + +### **`EachKey`** +```go +func EachKey(data []byte, cb func(idx int, value []byte, dataType jsonparser.ValueType, err error), paths ...[]string) +``` +When you need to read multiple keys, and you do not afraid of low-level API `EachKey` is your friend. It read payload only single time, and calls callback function once path is found. For example when you call multiple times `Get`, it has to process payload multiple times, each time you call it. Depending on payload `EachKey` can be multiple times faster than `Get`. Path can use nested keys as well! + +```go +paths := [][]string{ + []string{"uuid"}, + []string{"tz"}, + []string{"ua"}, + []string{"st"}, +} +var data SmallPayload + +jsonparser.EachKey(smallFixture, func(idx int, value []byte, vt jsonparser.ValueType, err error){ + switch idx { + case 0: + data.Uuid, _ = value + case 1: + v, _ := jsonparser.ParseInt(value) + data.Tz = int(v) + case 2: + data.Ua, _ = value + case 3: + v, _ := jsonparser.ParseInt(value) + data.St = int(v) + } +}, paths...) +``` + +### **`Set`** +```go +func Set(data []byte, setValue []byte, keys ...string) (value []byte, err error) +``` +Receives existing data structure, key path to set, and value to set at that key. *This functionality is experimental.* + +Returns: +* `value` - Pointer to original data structure with updated or added key value. +* `err` - If any parsing issue, it should return error. + +Accepts multiple keys to specify path to JSON value (in case of updating or creating nested structures). + +Note that keys can be an array indexes: `jsonparser.Set(data, []byte("http://github.com"), "person", "avatars", "[0]", "url")` + +### **`Delete`** +```go +func Delete(data []byte, keys ...string) value []byte +``` +Receives existing data structure, and key path to delete. *This functionality is experimental.* + +Returns: +* `value` - Pointer to original data structure with key path deleted if it can be found. If there is no key path, then the whole data structure is deleted. + +Accepts multiple keys to specify path to JSON value (in case of updating or creating nested structures). + +Note that keys can be an array indexes: `jsonparser.Delete(data, "person", "avatars", "[0]", "url")` + + +## What makes it so fast? +* It does not rely on `encoding/json`, `reflection` or `interface{}`, the only real package dependency is `bytes`. +* Operates with JSON payload on byte level, providing you pointers to the original data structure: no memory allocation. +* No automatic type conversions, by default everything is a []byte, but it provides you value type, so you can convert by yourself (there is few helpers included). +* Does not parse full record, only keys you specified + + +## Benchmarks + +There are 3 benchmark types, trying to simulate real-life usage for small, medium and large JSON payloads. +For each metric, the lower value is better. Time/op is in nanoseconds. Values better than standard encoding/json marked as bold text. +Benchmarks run on standard Linode 1024 box. + +Compared libraries: +* https://golang.org/pkg/encoding/json +* https://github.com/Jeffail/gabs +* https://github.com/a8m/djson +* https://github.com/bitly/go-simplejson +* https://github.com/antonholmquist/jason +* https://github.com/mreiferson/go-ujson +* https://github.com/ugorji/go/codec +* https://github.com/pquerna/ffjson +* https://github.com/mailru/easyjson +* https://github.com/buger/jsonparser + +#### TLDR +If you want to skip next sections we have 2 winner: `jsonparser` and `easyjson`. +`jsonparser` is up to 10 times faster than standard `encoding/json` package (depending on payload size and usage), and almost infinitely (literally) better in memory consumption because it operates with data on byte level, and provide direct slice pointers. +`easyjson` wins in CPU in medium tests and frankly i'm impressed with this package: it is remarkable results considering that it is almost drop-in replacement for `encoding/json` (require some code generation). + +It's hard to fully compare `jsonparser` and `easyjson` (or `ffson`), they a true parsers and fully process record, unlike `jsonparser` which parse only keys you specified. + +If you searching for replacement of `encoding/json` while keeping structs, `easyjson` is an amazing choice. If you want to process dynamic JSON, have memory constrains, or more control over your data you should try `jsonparser`. + +`jsonparser` performance heavily depends on usage, and it works best when you do not need to process full record, only some keys. The more calls you need to make, the slower it will be, in contrast `easyjson` (or `ffjson`, `encoding/json`) parser record only 1 time, and then you can make as many calls as you want. + +With great power comes great responsibility! :) + + +#### Small payload + +Each test processes 190 bytes of http log as a JSON record. +It should read multiple fields. +https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_small_payload_test.go + +Library | time/op | bytes/op | allocs/op + ------ | ------- | -------- | ------- +encoding/json struct | 7879 | 880 | 18 +encoding/json interface{} | 8946 | 1521 | 38 +Jeffail/gabs | 10053 | 1649 | 46 +bitly/go-simplejson | 10128 | 2241 | 36 +antonholmquist/jason | 27152 | 7237 | 101 +github.com/ugorji/go/codec | 8806 | 2176 | 31 +mreiferson/go-ujson | **7008** | **1409** | 37 +a8m/djson | 3862 | 1249 | 30 +pquerna/ffjson | **3769** | **624** | **15** +mailru/easyjson | **2002** | **192** | **9** +buger/jsonparser | **1367** | **0** | **0** +buger/jsonparser (EachKey API) | **809** | **0** | **0** + +Winners are ffjson, easyjson and jsonparser, where jsonparser is up to 9.8x faster than encoding/json and 4.6x faster than ffjson, and slightly faster than easyjson. +If you look at memory allocation, jsonparser has no rivals, as it makes no data copy and operates with raw []byte structures and pointers to it. + +#### Medium payload + +Each test processes a 2.4kb JSON record (based on Clearbit API). +It should read multiple nested fields and 1 array. + +https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_medium_payload_test.go + +| Library | time/op | bytes/op | allocs/op | +| ------- | ------- | -------- | --------- | +| encoding/json struct | 57749 | 1336 | 29 | +| encoding/json interface{} | 79297 | 10627 | 215 | +| Jeffail/gabs | 83807 | 11202 | 235 | +| bitly/go-simplejson | 88187 | 17187 | 220 | +| antonholmquist/jason | 94099 | 19013 | 247 | +| github.com/ugorji/go/codec | 114719 | 6712 | 152 | +| mreiferson/go-ujson | **56972** | 11547 | 270 | +| a8m/djson | 28525 | 10196 | 198 | +| pquerna/ffjson | **20298** | **856** | **20** | +| mailru/easyjson | **10512** | **336** | **12** | +| buger/jsonparser | **15955** | **0** | **0** | +| buger/jsonparser (EachKey API) | **8916** | **0** | **0** | + +The difference between ffjson and jsonparser in CPU usage is smaller, while the memory consumption difference is growing. On the other hand `easyjson` shows remarkable performance for medium payload. + +`gabs`, `go-simplejson` and `jason` are based on encoding/json and map[string]interface{} and actually only helpers for unstructured JSON, their performance correlate with `encoding/json interface{}`, and they will skip next round. +`go-ujson` while have its own parser, shows same performance as `encoding/json`, also skips next round. Same situation with `ugorji/go/codec`, but it showed unexpectedly bad performance for complex payloads. + + +#### Large payload + +Each test processes a 24kb JSON record (based on Discourse API) +It should read 2 arrays, and for each item in array get a few fields. +Basically it means processing a full JSON file. + +https://github.com/buger/jsonparser/blob/master/benchmark/benchmark_large_payload_test.go + +| Library | time/op | bytes/op | allocs/op | +| --- | --- | --- | --- | +| encoding/json struct | 748336 | 8272 | 307 | +| encoding/json interface{} | 1224271 | 215425 | 3395 | +| a8m/djson | 510082 | 213682 | 2845 | +| pquerna/ffjson | **312271** | **7792** | **298** | +| mailru/easyjson | **154186** | **6992** | **288** | +| buger/jsonparser | **85308** | **0** | **0** | + +`jsonparser` now is a winner, but do not forget that it is way more lightweight parser than `ffson` or `easyjson`, and they have to parser all the data, while `jsonparser` parse only what you need. All `ffjson`, `easysjon` and `jsonparser` have their own parsing code, and does not depend on `encoding/json` or `interface{}`, thats one of the reasons why they are so fast. `easyjson` also use a bit of `unsafe` package to reduce memory consuption (in theory it can lead to some unexpected GC issue, but i did not tested enough) + +Also last benchmark did not included `EachKey` test, because in this particular case we need to read lot of Array values, and using `ArrayEach` is more efficient. + +## Questions and support + +All bug-reports and suggestions should go though Github Issues. + +## Contributing + +1. Fork it +2. Create your feature branch (git checkout -b my-new-feature) +3. Commit your changes (git commit -am 'Added some feature') +4. Push to the branch (git push origin my-new-feature) +5. Create new Pull Request + +## Development + +All my development happens using Docker, and repo include some Make tasks to simplify development. + +* `make build` - builds docker image, usually can be called only once +* `make test` - run tests +* `make fmt` - run go fmt +* `make bench` - run benchmarks (if you need to run only single benchmark modify `BENCHMARK` variable in make file) +* `make profile` - runs benchmark and generate 3 files- `cpu.out`, `mem.mprof` and `benchmark.test` binary, which can be used for `go tool pprof` +* `make bash` - enter container (i use it for running `go tool pprof` above) diff --git a/vendor/github.com/buger/jsonparser/bytes.go b/vendor/github.com/buger/jsonparser/bytes.go new file mode 100644 index 0000000000000..0bb0ff39562cb --- /dev/null +++ b/vendor/github.com/buger/jsonparser/bytes.go @@ -0,0 +1,47 @@ +package jsonparser + +import ( + bio "bytes" +) + +// minInt64 '-9223372036854775808' is the smallest representable number in int64 +const minInt64 = `9223372036854775808` + +// About 2x faster then strconv.ParseInt because it only supports base 10, which is enough for JSON +func parseInt(bytes []byte) (v int64, ok bool, overflow bool) { + if len(bytes) == 0 { + return 0, false, false + } + + var neg bool = false + if bytes[0] == '-' { + neg = true + bytes = bytes[1:] + } + + var b int64 = 0 + for _, c := range bytes { + if c >= '0' && c <= '9' { + b = (10 * v) + int64(c-'0') + } else { + return 0, false, false + } + if overflow = (b < v); overflow { + break + } + v = b + } + + if overflow { + if neg && bio.Equal(bytes, []byte(minInt64)) { + return b, true, false + } + return 0, false, true + } + + if neg { + return -v, true, false + } else { + return v, true, false + } +} diff --git a/vendor/github.com/buger/jsonparser/bytes_safe.go b/vendor/github.com/buger/jsonparser/bytes_safe.go new file mode 100644 index 0000000000000..ff16a4a19552d --- /dev/null +++ b/vendor/github.com/buger/jsonparser/bytes_safe.go @@ -0,0 +1,25 @@ +// +build appengine appenginevm + +package jsonparser + +import ( + "strconv" +) + +// See fastbytes_unsafe.go for explanation on why *[]byte is used (signatures must be consistent with those in that file) + +func equalStr(b *[]byte, s string) bool { + return string(*b) == s +} + +func parseFloat(b *[]byte) (float64, error) { + return strconv.ParseFloat(string(*b), 64) +} + +func bytesToString(b *[]byte) string { + return string(*b) +} + +func StringToBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/buger/jsonparser/bytes_unsafe.go b/vendor/github.com/buger/jsonparser/bytes_unsafe.go new file mode 100644 index 0000000000000..589fea87eb33f --- /dev/null +++ b/vendor/github.com/buger/jsonparser/bytes_unsafe.go @@ -0,0 +1,44 @@ +// +build !appengine,!appenginevm + +package jsonparser + +import ( + "reflect" + "strconv" + "unsafe" + "runtime" +) + +// +// The reason for using *[]byte rather than []byte in parameters is an optimization. As of Go 1.6, +// the compiler cannot perfectly inline the function when using a non-pointer slice. That is, +// the non-pointer []byte parameter version is slower than if its function body is manually +// inlined, whereas the pointer []byte version is equally fast to the manually inlined +// version. Instruction count in assembly taken from "go tool compile" confirms this difference. +// +// TODO: Remove hack after Go 1.7 release +// +func equalStr(b *[]byte, s string) bool { + return *(*string)(unsafe.Pointer(b)) == s +} + +func parseFloat(b *[]byte) (float64, error) { + return strconv.ParseFloat(*(*string)(unsafe.Pointer(b)), 64) +} + +// A hack until issue golang/go#2632 is fixed. +// See: https://github.com/golang/go/issues/2632 +func bytesToString(b *[]byte) string { + return *(*string)(unsafe.Pointer(b)) +} + +func StringToBytes(s string) []byte { + b := make([]byte, 0, 0) + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + sh := (*reflect.StringHeader)(unsafe.Pointer(&s)) + bh.Data = sh.Data + bh.Cap = sh.Len + bh.Len = sh.Len + runtime.KeepAlive(s) + return b +} diff --git a/vendor/github.com/buger/jsonparser/escape.go b/vendor/github.com/buger/jsonparser/escape.go new file mode 100644 index 0000000000000..49669b94207c2 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/escape.go @@ -0,0 +1,173 @@ +package jsonparser + +import ( + "bytes" + "unicode/utf8" +) + +// JSON Unicode stuff: see https://tools.ietf.org/html/rfc7159#section-7 + +const supplementalPlanesOffset = 0x10000 +const highSurrogateOffset = 0xD800 +const lowSurrogateOffset = 0xDC00 + +const basicMultilingualPlaneReservedOffset = 0xDFFF +const basicMultilingualPlaneOffset = 0xFFFF + +func combineUTF16Surrogates(high, low rune) rune { + return supplementalPlanesOffset + (high-highSurrogateOffset)<<10 + (low - lowSurrogateOffset) +} + +const badHex = -1 + +func h2I(c byte) int { + switch { + case c >= '0' && c <= '9': + return int(c - '0') + case c >= 'A' && c <= 'F': + return int(c - 'A' + 10) + case c >= 'a' && c <= 'f': + return int(c - 'a' + 10) + } + return badHex +} + +// decodeSingleUnicodeEscape decodes a single \uXXXX escape sequence. The prefix \u is assumed to be present and +// is not checked. +// In JSON, these escapes can either come alone or as part of "UTF16 surrogate pairs" that must be handled together. +// This function only handles one; decodeUnicodeEscape handles this more complex case. +func decodeSingleUnicodeEscape(in []byte) (rune, bool) { + // We need at least 6 characters total + if len(in) < 6 { + return utf8.RuneError, false + } + + // Convert hex to decimal + h1, h2, h3, h4 := h2I(in[2]), h2I(in[3]), h2I(in[4]), h2I(in[5]) + if h1 == badHex || h2 == badHex || h3 == badHex || h4 == badHex { + return utf8.RuneError, false + } + + // Compose the hex digits + return rune(h1<<12 + h2<<8 + h3<<4 + h4), true +} + +// isUTF16EncodedRune checks if a rune is in the range for non-BMP characters, +// which is used to describe UTF16 chars. +// Source: https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane +func isUTF16EncodedRune(r rune) bool { + return highSurrogateOffset <= r && r <= basicMultilingualPlaneReservedOffset +} + +func decodeUnicodeEscape(in []byte) (rune, int) { + if r, ok := decodeSingleUnicodeEscape(in); !ok { + // Invalid Unicode escape + return utf8.RuneError, -1 + } else if r <= basicMultilingualPlaneOffset && !isUTF16EncodedRune(r) { + // Valid Unicode escape in Basic Multilingual Plane + return r, 6 + } else if r2, ok := decodeSingleUnicodeEscape(in[6:]); !ok { // Note: previous decodeSingleUnicodeEscape success guarantees at least 6 bytes remain + // UTF16 "high surrogate" without manditory valid following Unicode escape for the "low surrogate" + return utf8.RuneError, -1 + } else if r2 < lowSurrogateOffset { + // Invalid UTF16 "low surrogate" + return utf8.RuneError, -1 + } else { + // Valid UTF16 surrogate pair + return combineUTF16Surrogates(r, r2), 12 + } +} + +// backslashCharEscapeTable: when '\X' is found for some byte X, it is to be replaced with backslashCharEscapeTable[X] +var backslashCharEscapeTable = [...]byte{ + '"': '"', + '\\': '\\', + '/': '/', + 'b': '\b', + 'f': '\f', + 'n': '\n', + 'r': '\r', + 't': '\t', +} + +// unescapeToUTF8 unescapes the single escape sequence starting at 'in' into 'out' and returns +// how many characters were consumed from 'in' and emitted into 'out'. +// If a valid escape sequence does not appear as a prefix of 'in', (-1, -1) to signal the error. +func unescapeToUTF8(in, out []byte) (inLen int, outLen int) { + if len(in) < 2 || in[0] != '\\' { + // Invalid escape due to insufficient characters for any escape or no initial backslash + return -1, -1 + } + + // https://tools.ietf.org/html/rfc7159#section-7 + switch e := in[1]; e { + case '"', '\\', '/', 'b', 'f', 'n', 'r', 't': + // Valid basic 2-character escapes (use lookup table) + out[0] = backslashCharEscapeTable[e] + return 2, 1 + case 'u': + // Unicode escape + if r, inLen := decodeUnicodeEscape(in); inLen == -1 { + // Invalid Unicode escape + return -1, -1 + } else { + // Valid Unicode escape; re-encode as UTF8 + outLen := utf8.EncodeRune(out, r) + return inLen, outLen + } + } + + return -1, -1 +} + +// unescape unescapes the string contained in 'in' and returns it as a slice. +// If 'in' contains no escaped characters: +// Returns 'in'. +// Else, if 'out' is of sufficient capacity (guaranteed if cap(out) >= len(in)): +// 'out' is used to build the unescaped string and is returned with no extra allocation +// Else: +// A new slice is allocated and returned. +func Unescape(in, out []byte) ([]byte, error) { + firstBackslash := bytes.IndexByte(in, '\\') + if firstBackslash == -1 { + return in, nil + } + + // Get a buffer of sufficient size (allocate if needed) + if cap(out) < len(in) { + out = make([]byte, len(in)) + } else { + out = out[0:len(in)] + } + + // Copy the first sequence of unescaped bytes to the output and obtain a buffer pointer (subslice) + copy(out, in[:firstBackslash]) + in = in[firstBackslash:] + buf := out[firstBackslash:] + + for len(in) > 0 { + // Unescape the next escaped character + inLen, bufLen := unescapeToUTF8(in, buf) + if inLen == -1 { + return nil, MalformedStringEscapeError + } + + in = in[inLen:] + buf = buf[bufLen:] + + // Copy everything up until the next backslash + nextBackslash := bytes.IndexByte(in, '\\') + if nextBackslash == -1 { + copy(buf, in) + buf = buf[len(in):] + break + } else { + copy(buf, in[:nextBackslash]) + buf = buf[nextBackslash:] + in = in[nextBackslash:] + } + } + + // Trim the out buffer to the amount that was actually emitted + return out[:len(out)-len(buf)], nil +} diff --git a/vendor/github.com/buger/jsonparser/fuzz.go b/vendor/github.com/buger/jsonparser/fuzz.go new file mode 100644 index 0000000000000..854bd11b2cdc3 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/fuzz.go @@ -0,0 +1,117 @@ +package jsonparser + +func FuzzParseString(data []byte) int { + r, err := ParseString(data) + if err != nil || r == "" { + return 0 + } + return 1 +} + +func FuzzEachKey(data []byte) int { + paths := [][]string{ + {"name"}, + {"order"}, + {"nested", "a"}, + {"nested", "b"}, + {"nested2", "a"}, + {"nested", "nested3", "b"}, + {"arr", "[1]", "b"}, + {"arrInt", "[3]"}, + {"arrInt", "[5]"}, + {"nested"}, + {"arr", "["}, + {"a\n", "b\n"}, + } + EachKey(data, func(idx int, value []byte, vt ValueType, err error) {}, paths...) + return 1 +} + +func FuzzDelete(data []byte) int { + Delete(data, "test") + return 1 +} + +func FuzzSet(data []byte) int { + _, err := Set(data, []byte(`"new value"`), "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzObjectEach(data []byte) int { + _ = ObjectEach(data, func(key, value []byte, valueType ValueType, off int) error { + return nil + }) + return 1 +} + +func FuzzParseFloat(data []byte) int { + _, err := ParseFloat(data) + if err != nil { + return 0 + } + return 1 +} + +func FuzzParseInt(data []byte) int { + _, err := ParseInt(data) + if err != nil { + return 0 + } + return 1 +} + +func FuzzParseBool(data []byte) int { + _, err := ParseBoolean(data) + if err != nil { + return 0 + } + return 1 +} + +func FuzzTokenStart(data []byte) int { + _ = tokenStart(data) + return 1 +} + +func FuzzGetString(data []byte) int { + _, err := GetString(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetFloat(data []byte) int { + _, err := GetFloat(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetInt(data []byte) int { + _, err := GetInt(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetBoolean(data []byte) int { + _, err := GetBoolean(data, "test") + if err != nil { + return 0 + } + return 1 +} + +func FuzzGetUnsafeString(data []byte) int { + _, err := GetUnsafeString(data, "test") + if err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/buger/jsonparser/oss-fuzz-build.sh b/vendor/github.com/buger/jsonparser/oss-fuzz-build.sh new file mode 100644 index 0000000000000..c573b0e2d104f --- /dev/null +++ b/vendor/github.com/buger/jsonparser/oss-fuzz-build.sh @@ -0,0 +1,47 @@ +#!/bin/bash -eu + +git clone https://github.com/dvyukov/go-fuzz-corpus +zip corpus.zip go-fuzz-corpus/json/corpus/* + +cp corpus.zip $OUT/fuzzparsestring_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseString fuzzparsestring + +cp corpus.zip $OUT/fuzzeachkey_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzEachKey fuzzeachkey + +cp corpus.zip $OUT/fuzzdelete_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzDelete fuzzdelete + +cp corpus.zip $OUT/fuzzset_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzSet fuzzset + +cp corpus.zip $OUT/fuzzobjecteach_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzObjectEach fuzzobjecteach + +cp corpus.zip $OUT/fuzzparsefloat_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseFloat fuzzparsefloat + +cp corpus.zip $OUT/fuzzparseint_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseInt fuzzparseint + +cp corpus.zip $OUT/fuzzparsebool_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzParseBool fuzzparsebool + +cp corpus.zip $OUT/fuzztokenstart_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzTokenStart fuzztokenstart + +cp corpus.zip $OUT/fuzzgetstring_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetString fuzzgetstring + +cp corpus.zip $OUT/fuzzgetfloat_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetFloat fuzzgetfloat + +cp corpus.zip $OUT/fuzzgetint_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetInt fuzzgetint + +cp corpus.zip $OUT/fuzzgetboolean_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetBoolean fuzzgetboolean + +cp corpus.zip $OUT/fuzzgetunsafestring_seed_corpus.zip +compile_go_fuzzer github.com/buger/jsonparser FuzzGetUnsafeString fuzzgetunsafestring + diff --git a/vendor/github.com/buger/jsonparser/parser.go b/vendor/github.com/buger/jsonparser/parser.go new file mode 100644 index 0000000000000..14b80bc4838c5 --- /dev/null +++ b/vendor/github.com/buger/jsonparser/parser.go @@ -0,0 +1,1283 @@ +package jsonparser + +import ( + "bytes" + "errors" + "fmt" + "strconv" +) + +// Errors +var ( + KeyPathNotFoundError = errors.New("Key path not found") + UnknownValueTypeError = errors.New("Unknown value type") + MalformedJsonError = errors.New("Malformed JSON error") + MalformedStringError = errors.New("Value is string, but can't find closing '\"' symbol") + MalformedArrayError = errors.New("Value is array, but can't find closing ']' symbol") + MalformedObjectError = errors.New("Value looks like object, but can't find closing '}' symbol") + MalformedValueError = errors.New("Value looks like Number/Boolean/None, but can't find its end: ',' or '}' symbol") + OverflowIntegerError = errors.New("Value is number, but overflowed while parsing") + MalformedStringEscapeError = errors.New("Encountered an invalid escape sequence in a string") +) + +// How much stack space to allocate for unescaping JSON strings; if a string longer +// than this needs to be escaped, it will result in a heap allocation +const unescapeStackBufSize = 64 + +func tokenEnd(data []byte) int { + for i, c := range data { + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + return i + } + } + + return len(data) +} + +func findTokenStart(data []byte, token byte) int { + for i := len(data) - 1; i >= 0; i-- { + switch data[i] { + case token: + return i + case '[', '{': + return 0 + } + } + + return 0 +} + +func findKeyStart(data []byte, key string) (int, error) { + i := 0 + ln := len(data) + if ln > 0 && (data[0] == '{' || data[0] == '[') { + i = 1 + } + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + + if ku, err := Unescape(StringToBytes(key), stackbuf[:]); err == nil { + key = bytesToString(&ku) + } + + for i < ln { + switch data[i] { + case '"': + i++ + keyBegin := i + + strEnd, keyEscaped := stringEnd(data[i:]) + if strEnd == -1 { + break + } + i += strEnd + keyEnd := i - 1 + + valueOffset := nextToken(data[i:]) + if valueOffset == -1 { + break + } + + i += valueOffset + + // if string is a key, and key level match + k := data[keyBegin:keyEnd] + // for unescape: if there are no escape sequences, this is cheap; if there are, it is a + // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize + if keyEscaped { + if ku, err := Unescape(k, stackbuf[:]); err != nil { + break + } else { + k = ku + } + } + + if data[i] == ':' && len(key) == len(k) && bytesToString(&k) == key { + return keyBegin - 1, nil + } + + case '[': + end := blockEnd(data[i:], data[i], ']') + if end != -1 { + i = i + end + } + case '{': + end := blockEnd(data[i:], data[i], '}') + if end != -1 { + i = i + end + } + } + i++ + } + + return -1, KeyPathNotFoundError +} + +func tokenStart(data []byte) int { + for i := len(data) - 1; i >= 0; i-- { + switch data[i] { + case '\n', '\r', '\t', ',', '{', '[': + return i + } + } + + return 0 +} + +// Find position of next character which is not whitespace +func nextToken(data []byte) int { + for i, c := range data { + switch c { + case ' ', '\n', '\r', '\t': + continue + default: + return i + } + } + + return -1 +} + +// Find position of last character which is not whitespace +func lastToken(data []byte) int { + for i := len(data) - 1; i >= 0; i-- { + switch data[i] { + case ' ', '\n', '\r', '\t': + continue + default: + return i + } + } + + return -1 +} + +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func stringEnd(data []byte) (int, bool) { + escaped := false + for i, c := range data { + if c == '"' { + if !escaped { + return i + 1, false + } else { + j := i - 1 + for { + if j < 0 || data[j] != '\\' { + return i + 1, true // even number of backslashes + } + j-- + if j < 0 || data[j] != '\\' { + break // odd number of backslashes + } + j-- + + } + } + } else if c == '\\' { + escaped = true + } + } + + return -1, escaped +} + +// Find end of the data structure, array or object. +// For array openSym and closeSym will be '[' and ']', for object '{' and '}' +func blockEnd(data []byte, openSym byte, closeSym byte) int { + level := 0 + i := 0 + ln := len(data) + + for i < ln { + switch data[i] { + case '"': // If inside string, skip it + se, _ := stringEnd(data[i+1:]) + if se == -1 { + return -1 + } + i += se + case openSym: // If open symbol, increase level + level++ + case closeSym: // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + return i + 1 + } + } + i++ + } + + return -1 +} + +func searchKeys(data []byte, keys ...string) int { + keyLevel := 0 + level := 0 + i := 0 + ln := len(data) + lk := len(keys) + lastMatched := true + + if lk == 0 { + return 0 + } + + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + + for i < ln { + switch data[i] { + case '"': + i++ + keyBegin := i + + strEnd, keyEscaped := stringEnd(data[i:]) + if strEnd == -1 { + return -1 + } + i += strEnd + keyEnd := i - 1 + + valueOffset := nextToken(data[i:]) + if valueOffset == -1 { + return -1 + } + + i += valueOffset + + // if string is a key + if data[i] == ':' { + if level < 1 { + return -1 + } + + key := data[keyBegin:keyEnd] + + // for unescape: if there are no escape sequences, this is cheap; if there are, it is a + // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize + var keyUnesc []byte + if !keyEscaped { + keyUnesc = key + } else if ku, err := Unescape(key, stackbuf[:]); err != nil { + return -1 + } else { + keyUnesc = ku + } + + if level <= len(keys) { + if equalStr(&keyUnesc, keys[level-1]) { + lastMatched = true + + // if key level match + if keyLevel == level-1 { + keyLevel++ + // If we found all keys in path + if keyLevel == lk { + return i + 1 + } + } + } else { + lastMatched = false + } + } else { + return -1 + } + } else { + i-- + } + case '{': + + // in case parent key is matched then only we will increase the level otherwise can directly + // can move to the end of this block + if !lastMatched { + end := blockEnd(data[i:], '{', '}') + if end == -1 { + return -1 + } + i += end - 1 + } else { + level++ + } + case '}': + level-- + if level == keyLevel { + keyLevel-- + } + case '[': + // If we want to get array element by index + if keyLevel == level && keys[level][0] == '[' { + var keyLen = len(keys[level]) + if keyLen < 3 || keys[level][0] != '[' || keys[level][keyLen-1] != ']' { + return -1 + } + aIdx, err := strconv.Atoi(keys[level][1 : keyLen-1]) + if err != nil { + return -1 + } + var curIdx int + var valueFound []byte + var valueOffset int + var curI = i + ArrayEach(data[i:], func(value []byte, dataType ValueType, offset int, err error) { + if curIdx == aIdx { + valueFound = value + valueOffset = offset + if dataType == String { + valueOffset = valueOffset - 2 + valueFound = data[curI+valueOffset : curI+valueOffset+len(value)+2] + } + } + curIdx += 1 + }) + + if valueFound == nil { + return -1 + } else { + subIndex := searchKeys(valueFound, keys[level+1:]...) + if subIndex < 0 { + return -1 + } + return i + valueOffset + subIndex + } + } else { + // Do not search for keys inside arrays + if arraySkip := blockEnd(data[i:], '[', ']'); arraySkip == -1 { + return -1 + } else { + i += arraySkip - 1 + } + } + case ':': // If encountered, JSON data is malformed + return -1 + } + + i++ + } + + return -1 +} + +func sameTree(p1, p2 []string) bool { + minLen := len(p1) + if len(p2) < minLen { + minLen = len(p2) + } + + for pi_1, p_1 := range p1[:minLen] { + if p2[pi_1] != p_1 { + return false + } + } + + return true +} + +func EachKey(data []byte, cb func(int, []byte, ValueType, error), paths ...[]string) int { + var x struct{} + pathFlags := make([]bool, len(paths)) + var level, pathsMatched, i int + ln := len(data) + + var maxPath int + for _, p := range paths { + if len(p) > maxPath { + maxPath = len(p) + } + } + + pathsBuf := make([]string, maxPath) + + for i < ln { + switch data[i] { + case '"': + i++ + keyBegin := i + + strEnd, keyEscaped := stringEnd(data[i:]) + if strEnd == -1 { + return -1 + } + i += strEnd + + keyEnd := i - 1 + + valueOffset := nextToken(data[i:]) + if valueOffset == -1 { + return -1 + } + + i += valueOffset + + // if string is a key, and key level match + if data[i] == ':' { + match := -1 + key := data[keyBegin:keyEnd] + + // for unescape: if there are no escape sequences, this is cheap; if there are, it is a + // bit more expensive, but causes no allocations unless len(key) > unescapeStackBufSize + var keyUnesc []byte + if !keyEscaped { + keyUnesc = key + } else { + var stackbuf [unescapeStackBufSize]byte + if ku, err := Unescape(key, stackbuf[:]); err != nil { + return -1 + } else { + keyUnesc = ku + } + } + + if maxPath >= level { + if level < 1 { + cb(-1, nil, Unknown, MalformedJsonError) + return -1 + } + + pathsBuf[level-1] = bytesToString(&keyUnesc) + for pi, p := range paths { + if len(p) != level || pathFlags[pi] || !equalStr(&keyUnesc, p[level-1]) || !sameTree(p, pathsBuf[:level]) { + continue + } + + match = pi + + pathsMatched++ + pathFlags[pi] = true + + v, dt, _, e := Get(data[i+1:]) + cb(pi, v, dt, e) + + if pathsMatched == len(paths) { + break + } + } + if pathsMatched == len(paths) { + return i + } + } + + if match == -1 { + tokenOffset := nextToken(data[i+1:]) + i += tokenOffset + + if data[i] == '{' { + blockSkip := blockEnd(data[i:], '{', '}') + i += blockSkip + 1 + } + } + + if i < ln { + switch data[i] { + case '{', '}', '[', '"': + i-- + } + } + } else { + i-- + } + case '{': + level++ + case '}': + level-- + case '[': + var ok bool + arrIdxFlags := make(map[int]struct{}) + pIdxFlags := make([]bool, len(paths)) + + if level < 0 { + cb(-1, nil, Unknown, MalformedJsonError) + return -1 + } + + for pi, p := range paths { + if len(p) < level+1 || pathFlags[pi] || p[level][0] != '[' || !sameTree(p, pathsBuf[:level]) { + continue + } + if len(p[level]) >= 2 { + aIdx, _ := strconv.Atoi(p[level][1 : len(p[level])-1]) + arrIdxFlags[aIdx] = x + pIdxFlags[pi] = true + } + } + + if len(arrIdxFlags) > 0 { + level++ + + var curIdx int + arrOff, _ := ArrayEach(data[i:], func(value []byte, dataType ValueType, offset int, err error) { + if _, ok = arrIdxFlags[curIdx]; ok { + for pi, p := range paths { + if pIdxFlags[pi] { + aIdx, _ := strconv.Atoi(p[level-1][1 : len(p[level-1])-1]) + + if curIdx == aIdx { + of := searchKeys(value, p[level:]...) + + pathsMatched++ + pathFlags[pi] = true + + if of != -1 { + v, dt, _, e := Get(value[of:]) + cb(pi, v, dt, e) + } + } + } + } + } + + curIdx += 1 + }) + + if pathsMatched == len(paths) { + return i + } + + i += arrOff - 1 + } else { + // Do not search for keys inside arrays + if arraySkip := blockEnd(data[i:], '[', ']'); arraySkip == -1 { + return -1 + } else { + i += arraySkip - 1 + } + } + case ']': + level-- + } + + i++ + } + + return -1 +} + +// Data types available in valid JSON data. +type ValueType int + +const ( + NotExist = ValueType(iota) + String + Number + Object + Array + Boolean + Null + Unknown +) + +func (vt ValueType) String() string { + switch vt { + case NotExist: + return "non-existent" + case String: + return "string" + case Number: + return "number" + case Object: + return "object" + case Array: + return "array" + case Boolean: + return "boolean" + case Null: + return "null" + default: + return "unknown" + } +} + +var ( + trueLiteral = []byte("true") + falseLiteral = []byte("false") + nullLiteral = []byte("null") +) + +func createInsertComponent(keys []string, setValue []byte, comma, object bool) []byte { + isIndex := string(keys[0][0]) == "[" + offset := 0 + lk := calcAllocateSpace(keys, setValue, comma, object) + buffer := make([]byte, lk, lk) + if comma { + offset += WriteToBuffer(buffer[offset:], ",") + } + if isIndex && !comma { + offset += WriteToBuffer(buffer[offset:], "[") + } else { + if object { + offset += WriteToBuffer(buffer[offset:], "{") + } + if !isIndex { + offset += WriteToBuffer(buffer[offset:], "\"") + offset += WriteToBuffer(buffer[offset:], keys[0]) + offset += WriteToBuffer(buffer[offset:], "\":") + } + } + + for i := 1; i < len(keys); i++ { + if string(keys[i][0]) == "[" { + offset += WriteToBuffer(buffer[offset:], "[") + } else { + offset += WriteToBuffer(buffer[offset:], "{\"") + offset += WriteToBuffer(buffer[offset:], keys[i]) + offset += WriteToBuffer(buffer[offset:], "\":") + } + } + offset += WriteToBuffer(buffer[offset:], string(setValue)) + for i := len(keys) - 1; i > 0; i-- { + if string(keys[i][0]) == "[" { + offset += WriteToBuffer(buffer[offset:], "]") + } else { + offset += WriteToBuffer(buffer[offset:], "}") + } + } + if isIndex && !comma { + offset += WriteToBuffer(buffer[offset:], "]") + } + if object && !isIndex { + offset += WriteToBuffer(buffer[offset:], "}") + } + return buffer +} + +func calcAllocateSpace(keys []string, setValue []byte, comma, object bool) int { + isIndex := string(keys[0][0]) == "[" + lk := 0 + if comma { + // , + lk += 1 + } + if isIndex && !comma { + // [] + lk += 2 + } else { + if object { + // { + lk += 1 + } + if !isIndex { + // "keys[0]" + lk += len(keys[0]) + 3 + } + } + + + lk += len(setValue) + for i := 1; i < len(keys); i++ { + if string(keys[i][0]) == "[" { + // [] + lk += 2 + } else { + // {"keys[i]":setValue} + lk += len(keys[i]) + 5 + } + } + + if object && !isIndex { + // } + lk += 1 + } + + return lk +} + +func WriteToBuffer(buffer []byte, str string) int { + copy(buffer, str) + return len(str) +} + +/* + +Del - Receives existing data structure, path to delete. + +Returns: +`data` - return modified data + +*/ +func Delete(data []byte, keys ...string) []byte { + lk := len(keys) + if lk == 0 { + return data[:0] + } + + array := false + if len(keys[lk-1]) > 0 && string(keys[lk-1][0]) == "[" { + array = true + } + + var startOffset, keyOffset int + endOffset := len(data) + var err error + if !array { + if len(keys) > 1 { + _, _, startOffset, endOffset, err = internalGet(data, keys[:lk-1]...) + if err == KeyPathNotFoundError { + // problem parsing the data + return data + } + } + + keyOffset, err = findKeyStart(data[startOffset:endOffset], keys[lk-1]) + if err == KeyPathNotFoundError { + // problem parsing the data + return data + } + keyOffset += startOffset + _, _, _, subEndOffset, _ := internalGet(data[startOffset:endOffset], keys[lk-1]) + endOffset = startOffset + subEndOffset + tokEnd := tokenEnd(data[endOffset:]) + tokStart := findTokenStart(data[:keyOffset], ","[0]) + + if data[endOffset+tokEnd] == ","[0] { + endOffset += tokEnd + 1 + } else if data[endOffset+tokEnd] == " "[0] && len(data) > endOffset+tokEnd+1 && data[endOffset+tokEnd+1] == ","[0] { + endOffset += tokEnd + 2 + } else if data[endOffset+tokEnd] == "}"[0] && data[tokStart] == ","[0] { + keyOffset = tokStart + } + } else { + _, _, keyOffset, endOffset, err = internalGet(data, keys...) + if err == KeyPathNotFoundError { + // problem parsing the data + return data + } + + tokEnd := tokenEnd(data[endOffset:]) + tokStart := findTokenStart(data[:keyOffset], ","[0]) + + if data[endOffset+tokEnd] == ","[0] { + endOffset += tokEnd + 1 + } else if data[endOffset+tokEnd] == "]"[0] && data[tokStart] == ","[0] { + keyOffset = tokStart + } + } + + // We need to remove remaining trailing comma if we delete las element in the object + prevTok := lastToken(data[:keyOffset]) + remainedValue := data[endOffset:] + + var newOffset int + if nextToken(remainedValue) > -1 && remainedValue[nextToken(remainedValue)] == '}' && data[prevTok] == ',' { + newOffset = prevTok + } else { + newOffset = prevTok + 1 + } + + // We have to make a copy here if we don't want to mangle the original data, because byte slices are + // accessed by reference and not by value + dataCopy := make([]byte, len(data)) + copy(dataCopy, data) + data = append(dataCopy[:newOffset], dataCopy[endOffset:]...) + + return data +} + +/* + +Set - Receives existing data structure, path to set, and data to set at that key. + +Returns: +`value` - modified byte array +`err` - On any parsing error + +*/ +func Set(data []byte, setValue []byte, keys ...string) (value []byte, err error) { + // ensure keys are set + if len(keys) == 0 { + return nil, KeyPathNotFoundError + } + + _, _, startOffset, endOffset, err := internalGet(data, keys...) + if err != nil { + if err != KeyPathNotFoundError { + // problem parsing the data + return nil, err + } + // full path doesnt exist + // does any subpath exist? + var depth int + for i := range keys { + _, _, start, end, sErr := internalGet(data, keys[:i+1]...) + if sErr != nil { + break + } else { + endOffset = end + startOffset = start + depth++ + } + } + comma := true + object := false + if endOffset == -1 { + firstToken := nextToken(data) + // We can't set a top-level key if data isn't an object + if firstToken < 0 || data[firstToken] != '{' { + return nil, KeyPathNotFoundError + } + // Don't need a comma if the input is an empty object + secondToken := firstToken + 1 + nextToken(data[firstToken+1:]) + if data[secondToken] == '}' { + comma = false + } + // Set the top level key at the end (accounting for any trailing whitespace) + // This assumes last token is valid like '}', could check and return error + endOffset = lastToken(data) + } + depthOffset := endOffset + if depth != 0 { + // if subpath is a non-empty object, add to it + // or if subpath is a non-empty array, add to it + if (data[startOffset] == '{' && data[startOffset+1+nextToken(data[startOffset+1:])] != '}') || + (data[startOffset] == '[' && data[startOffset+1+nextToken(data[startOffset+1:])] == '{') && keys[depth:][0][0] == 91 { + depthOffset-- + startOffset = depthOffset + // otherwise, over-write it with a new object + } else { + comma = false + object = true + } + } else { + startOffset = depthOffset + } + value = append(data[:startOffset], append(createInsertComponent(keys[depth:], setValue, comma, object), data[depthOffset:]...)...) + } else { + // path currently exists + startComponent := data[:startOffset] + endComponent := data[endOffset:] + + value = make([]byte, len(startComponent)+len(endComponent)+len(setValue)) + newEndOffset := startOffset + len(setValue) + copy(value[0:startOffset], startComponent) + copy(value[startOffset:newEndOffset], setValue) + copy(value[newEndOffset:], endComponent) + } + return value, nil +} + +func getType(data []byte, offset int) ([]byte, ValueType, int, error) { + var dataType ValueType + endOffset := offset + + // if string value + if data[offset] == '"' { + dataType = String + if idx, _ := stringEnd(data[offset+1:]); idx != -1 { + endOffset += idx + 1 + } else { + return nil, dataType, offset, MalformedStringError + } + } else if data[offset] == '[' { // if array value + dataType = Array + // break label, for stopping nested loops + endOffset = blockEnd(data[offset:], '[', ']') + + if endOffset == -1 { + return nil, dataType, offset, MalformedArrayError + } + + endOffset += offset + } else if data[offset] == '{' { // if object value + dataType = Object + // break label, for stopping nested loops + endOffset = blockEnd(data[offset:], '{', '}') + + if endOffset == -1 { + return nil, dataType, offset, MalformedObjectError + } + + endOffset += offset + } else { + // Number, Boolean or None + end := tokenEnd(data[endOffset:]) + + if end == -1 { + return nil, dataType, offset, MalformedValueError + } + + value := data[offset : endOffset+end] + + switch data[offset] { + case 't', 'f': // true or false + if bytes.Equal(value, trueLiteral) || bytes.Equal(value, falseLiteral) { + dataType = Boolean + } else { + return nil, Unknown, offset, UnknownValueTypeError + } + case 'u', 'n': // undefined or null + if bytes.Equal(value, nullLiteral) { + dataType = Null + } else { + return nil, Unknown, offset, UnknownValueTypeError + } + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + dataType = Number + default: + return nil, Unknown, offset, UnknownValueTypeError + } + + endOffset += end + } + return data[offset:endOffset], dataType, endOffset, nil +} + +/* +Get - Receives data structure, and key path to extract value from. + +Returns: +`value` - Pointer to original data structure containing key value, or just empty slice if nothing found or error +`dataType` - Can be: `NotExist`, `String`, `Number`, `Object`, `Array`, `Boolean` or `Null` +`offset` - Offset from provided data structure where key value ends. Used mostly internally, for example for `ArrayEach` helper. +`err` - If key not found or any other parsing issue it should return error. If key not found it also sets `dataType` to `NotExist` + +Accept multiple keys to specify path to JSON value (in case of quering nested structures). +If no keys provided it will try to extract closest JSON value (simple ones or object/array), useful for reading streams or arrays, see `ArrayEach` implementation. +*/ +func Get(data []byte, keys ...string) (value []byte, dataType ValueType, offset int, err error) { + a, b, _, d, e := internalGet(data, keys...) + return a, b, d, e +} + +func internalGet(data []byte, keys ...string) (value []byte, dataType ValueType, offset, endOffset int, err error) { + if len(keys) > 0 { + if offset = searchKeys(data, keys...); offset == -1 { + return nil, NotExist, -1, -1, KeyPathNotFoundError + } + } + + // Go to closest value + nO := nextToken(data[offset:]) + if nO == -1 { + return nil, NotExist, offset, -1, MalformedJsonError + } + + offset += nO + value, dataType, endOffset, err = getType(data, offset) + if err != nil { + return value, dataType, offset, endOffset, err + } + + // Strip quotes from string values + if dataType == String { + value = value[1 : len(value)-1] + } + + return value[:len(value):len(value)], dataType, offset, endOffset, nil +} + +// ArrayEach is used when iterating arrays, accepts a callback function with the same return arguments as `Get`. +func ArrayEach(data []byte, cb func(value []byte, dataType ValueType, offset int, err error), keys ...string) (offset int, err error) { + if len(data) == 0 { + return -1, MalformedObjectError + } + + nT := nextToken(data) + if nT == -1 { + return -1, MalformedJsonError + } + + offset = nT + 1 + + if len(keys) > 0 { + if offset = searchKeys(data, keys...); offset == -1 { + return offset, KeyPathNotFoundError + } + + // Go to closest value + nO := nextToken(data[offset:]) + if nO == -1 { + return offset, MalformedJsonError + } + + offset += nO + + if data[offset] != '[' { + return offset, MalformedArrayError + } + + offset++ + } + + nO := nextToken(data[offset:]) + if nO == -1 { + return offset, MalformedJsonError + } + + offset += nO + + if data[offset] == ']' { + return offset, nil + } + + for true { + v, t, o, e := Get(data[offset:]) + + if e != nil { + return offset, e + } + + if o == 0 { + break + } + + if t != NotExist { + cb(v, t, offset+o-len(v), e) + } + + if e != nil { + break + } + + offset += o + + skipToToken := nextToken(data[offset:]) + if skipToToken == -1 { + return offset, MalformedArrayError + } + offset += skipToToken + + if data[offset] == ']' { + break + } + + if data[offset] != ',' { + return offset, MalformedArrayError + } + + offset++ + } + + return offset, nil +} + +// ObjectEach iterates over the key-value pairs of a JSON object, invoking a given callback for each such entry +func ObjectEach(data []byte, callback func(key []byte, value []byte, dataType ValueType, offset int) error, keys ...string) (err error) { + offset := 0 + + // Descend to the desired key, if requested + if len(keys) > 0 { + if off := searchKeys(data, keys...); off == -1 { + return KeyPathNotFoundError + } else { + offset = off + } + } + + // Validate and skip past opening brace + if off := nextToken(data[offset:]); off == -1 { + return MalformedObjectError + } else if offset += off; data[offset] != '{' { + return MalformedObjectError + } else { + offset++ + } + + // Skip to the first token inside the object, or stop if we find the ending brace + if off := nextToken(data[offset:]); off == -1 { + return MalformedJsonError + } else if offset += off; data[offset] == '}' { + return nil + } + + // Loop pre-condition: data[offset] points to what should be either the next entry's key, or the closing brace (if it's anything else, the JSON is malformed) + for offset < len(data) { + // Step 1: find the next key + var key []byte + + // Check what the the next token is: start of string, end of object, or something else (error) + switch data[offset] { + case '"': + offset++ // accept as string and skip opening quote + case '}': + return nil // we found the end of the object; stop and return success + default: + return MalformedObjectError + } + + // Find the end of the key string + var keyEscaped bool + if off, esc := stringEnd(data[offset:]); off == -1 { + return MalformedJsonError + } else { + key, keyEscaped = data[offset:offset+off-1], esc + offset += off + } + + // Unescape the string if needed + if keyEscaped { + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + if keyUnescaped, err := Unescape(key, stackbuf[:]); err != nil { + return MalformedStringEscapeError + } else { + key = keyUnescaped + } + } + + // Step 2: skip the colon + if off := nextToken(data[offset:]); off == -1 { + return MalformedJsonError + } else if offset += off; data[offset] != ':' { + return MalformedJsonError + } else { + offset++ + } + + // Step 3: find the associated value, then invoke the callback + if value, valueType, off, err := Get(data[offset:]); err != nil { + return err + } else if err := callback(key, value, valueType, offset+off); err != nil { // Invoke the callback here! + return err + } else { + offset += off + } + + // Step 4: skip over the next comma to the following token, or stop if we hit the ending brace + if off := nextToken(data[offset:]); off == -1 { + return MalformedArrayError + } else { + offset += off + switch data[offset] { + case '}': + return nil // Stop if we hit the close brace + case ',': + offset++ // Ignore the comma + default: + return MalformedObjectError + } + } + + // Skip to the next token after the comma + if off := nextToken(data[offset:]); off == -1 { + return MalformedArrayError + } else { + offset += off + } + } + + return MalformedObjectError // we shouldn't get here; it's expected that we will return via finding the ending brace +} + +// GetUnsafeString returns the value retrieved by `Get`, use creates string without memory allocation by mapping string to slice memory. It does not handle escape symbols. +func GetUnsafeString(data []byte, keys ...string) (val string, err error) { + v, _, _, e := Get(data, keys...) + + if e != nil { + return "", e + } + + return bytesToString(&v), nil +} + +// GetString returns the value retrieved by `Get`, cast to a string if possible, trying to properly handle escape and utf8 symbols +// If key data type do not match, it will return an error. +func GetString(data []byte, keys ...string) (val string, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return "", e + } + + if t != String { + return "", fmt.Errorf("Value is not a string: %s", string(v)) + } + + // If no escapes return raw content + if bytes.IndexByte(v, '\\') == -1 { + return string(v), nil + } + + return ParseString(v) +} + +// GetFloat returns the value retrieved by `Get`, cast to a float64 if possible. +// The offset is the same as in `Get`. +// If key data type do not match, it will return an error. +func GetFloat(data []byte, keys ...string) (val float64, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return 0, e + } + + if t != Number { + return 0, fmt.Errorf("Value is not a number: %s", string(v)) + } + + return ParseFloat(v) +} + +// GetInt returns the value retrieved by `Get`, cast to a int64 if possible. +// If key data type do not match, it will return an error. +func GetInt(data []byte, keys ...string) (val int64, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return 0, e + } + + if t != Number { + return 0, fmt.Errorf("Value is not a number: %s", string(v)) + } + + return ParseInt(v) +} + +// GetBoolean returns the value retrieved by `Get`, cast to a bool if possible. +// The offset is the same as in `Get`. +// If key data type do not match, it will return error. +func GetBoolean(data []byte, keys ...string) (val bool, err error) { + v, t, _, e := Get(data, keys...) + + if e != nil { + return false, e + } + + if t != Boolean { + return false, fmt.Errorf("Value is not a boolean: %s", string(v)) + } + + return ParseBoolean(v) +} + +// ParseBoolean parses a Boolean ValueType into a Go bool (not particularly useful, but here for completeness) +func ParseBoolean(b []byte) (bool, error) { + switch { + case bytes.Equal(b, trueLiteral): + return true, nil + case bytes.Equal(b, falseLiteral): + return false, nil + default: + return false, MalformedValueError + } +} + +// ParseString parses a String ValueType into a Go string (the main parsing work is unescaping the JSON string) +func ParseString(b []byte) (string, error) { + var stackbuf [unescapeStackBufSize]byte // stack-allocated array for allocation-free unescaping of small strings + if bU, err := Unescape(b, stackbuf[:]); err != nil { + return "", MalformedValueError + } else { + return string(bU), nil + } +} + +// ParseNumber parses a Number ValueType into a Go float64 +func ParseFloat(b []byte) (float64, error) { + if v, err := parseFloat(&b); err != nil { + return 0, MalformedValueError + } else { + return v, nil + } +} + +// ParseInt parses a Number ValueType into a Go int64 +func ParseInt(b []byte) (int64, error) { + if v, ok, overflow := parseInt(b); !ok { + if overflow { + return 0, OverflowIntegerError + } + return 0, MalformedValueError + } else { + return v, nil + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 753823cc94f40..f0152af75d4c2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -470,6 +470,9 @@ github.com/beorn7/perks/quantile # github.com/bmatcuk/doublestar v1.3.4 ## explicit; go 1.12 github.com/bmatcuk/doublestar +# github.com/buger/jsonparser v1.1.1 +## explicit; go 1.13 +github.com/buger/jsonparser # github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b ## explicit github.com/c2h5oh/datasize From 7b6f0577c3277b84230f0f2deba747b01ca2b2fa Mon Sep 17 00:00:00 2001 From: Dylan Guedes Date: Mon, 6 May 2024 09:04:04 -0300 Subject: [PATCH 02/47] feat: Querier: Split gRPC client into two. (#12726) **What this PR does / why we need it**: Split the gRPC client used by the querier into two, one for the communication with the scheduler, the other for communicating with the query-frontend. - This change is retrocompatible: you don't have to change anything to keep existing behavior. - To configure the custom scheduler grpc client, you can use the new `query_scheduler_grpc_client` config or the new CLI flag `querier.scheduler-grpc-client` - If you'd like to configure your frontend grpc client using a better named section, you can use the new `query_frontend_grpc_client` instead of the old `grpc_client_config`. Just make sure you don't use both at the same time, it will result in an error. This work is necessary for configuring custom behavior between `querier<->scheduler` vs `querier<->frontend`. A use case is configuring mTLS when a different certificate is used by queriers, schedulers and frontends. You can only configure a single `server_name` with our current setup, making it impossible. --- docs/sources/shared/configuration.md | 27 ++++-- pkg/loki/config_wrapper.go | 23 +++++ pkg/loki/config_wrapper_test.go | 103 ++++++++++++++++++++++ pkg/querier/worker/frontend_processor.go | 2 +- pkg/querier/worker/scheduler_processor.go | 4 +- pkg/querier/worker/worker.go | 55 ++++++++---- pkg/querier/worker/worker_test.go | 36 +++++++- tools/doc-generator/main.go | 6 ++ 8 files changed, 229 insertions(+), 27 deletions(-) diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index 674bb09ff0b9d..df40267e9ae84 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -543,19 +543,19 @@ The `alibabacloud_storage_config` block configures the connection to Alibaba Clo ```yaml # Name of OSS bucket. -# CLI flag: -common.storage.oss.bucketname +# CLI flag: -.storage.oss.bucketname [bucket: | default = ""] # oss Endpoint to connect to. -# CLI flag: -common.storage.oss.endpoint +# CLI flag: -.storage.oss.endpoint [endpoint: | default = ""] # alibabacloud Access Key ID -# CLI flag: -common.storage.oss.access-key-id +# CLI flag: -.storage.oss.access-key-id [access_key_id: | default = ""] # alibabacloud Secret Access Key -# CLI flag: -common.storage.oss.secret-access-key +# CLI flag: -.storage.oss.secret-access-key [secret_access_key: | default = ""] ``` @@ -2236,10 +2236,23 @@ The `frontend_worker` configures the worker - running within the Loki querier - # CLI flag: -querier.id [id: | default = ""] -# The grpc_client block configures the gRPC client used to communicate between a -# client and server component in Loki. +# Configures the querier gRPC client used to communicate with the +# query-frontend. Shouldn't be used in conjunction with 'grpc_client_config'. +# The CLI flags prefix for this block configuration is: +# querier.frontend-grpc-client +[query_frontend_grpc_client: ] + +# Configures the querier gRPC client used to communicate with the query-frontend +# and with the query-scheduler if 'query_scheduler_grpc_client' isn't defined. +# This shouldn't be used if 'query_frontend_grpc_client' is defined. # The CLI flags prefix for this block configuration is: querier.frontend-client [grpc_client_config: ] + +# Configures the querier gRPC client used to communicate with the +# query-scheduler. If not defined, 'grpc_client_config' is used instead. +# The CLI flags prefix for this block configuration is: +# querier.scheduler-grpc-client +[query_scheduler_grpc_client: ] ``` ### gcs_storage_config @@ -2297,6 +2310,8 @@ The `grpc_client` block configures the gRPC client used to communicate between a - `ingester.client` - `pattern-ingester.client` - `querier.frontend-client` +- `querier.frontend-grpc-client` +- `querier.scheduler-grpc-client` - `query-scheduler.grpc-client-config` - `ruler.client` - `tsdb.shipper.index-gateway-client.grpc` diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go index a0e5fa5043d55..2a4789fb9e60f 100644 --- a/pkg/loki/config_wrapper.go +++ b/pkg/loki/config_wrapper.go @@ -125,6 +125,9 @@ func (c *ConfigWrapper) ApplyDynamicConfig() cfg.Source { applyIngesterFinalSleep(r) applyIngesterReplicationFactor(r) applyChunkRetain(r, &defaults) + if err := applyCommonQuerierWorkerGRPCConfig(r, &defaults); err != nil { + return err + } return nil } @@ -684,3 +687,23 @@ func applyChunkRetain(cfg, defaults *ConfigWrapper) { } } } + +func applyCommonQuerierWorkerGRPCConfig(cfg, defaults *ConfigWrapper) error { + if !reflect.DeepEqual(cfg.Worker.OldQueryFrontendGRPCClientConfig, defaults.Worker.OldQueryFrontendGRPCClientConfig) { + // User is using the old grpc configuration. + + if reflect.DeepEqual(cfg.Worker.NewQueryFrontendGRPCClientConfig, defaults.Worker.NewQueryFrontendGRPCClientConfig) { + // User is using the old grpc configuration only, we can just copy it to the new grpc client struct. + cfg.Worker.NewQueryFrontendGRPCClientConfig = cfg.Worker.OldQueryFrontendGRPCClientConfig + } else { + // User is using both, old and new way of configuring the grpc client, so we throw an error. + return fmt.Errorf("both `grpc_client_config` and `query_frontend_grpc_client` are set at the same time. Please use only one of them") + } + + if reflect.DeepEqual(cfg.Worker.QuerySchedulerGRPCClientConfig, defaults.Worker.QuerySchedulerGRPCClientConfig) { + // Since the scheduler grpc client is not set, we can just copy the old query frontend grpc client to the scheduler grpc client. + cfg.Worker.QuerySchedulerGRPCClientConfig = cfg.Worker.OldQueryFrontendGRPCClientConfig + } + } + return nil +} diff --git a/pkg/loki/config_wrapper_test.go b/pkg/loki/config_wrapper_test.go index bda2b8fa2596f..d010419770936 100644 --- a/pkg/loki/config_wrapper_test.go +++ b/pkg/loki/config_wrapper_test.go @@ -799,6 +799,109 @@ query_range: config, _ := testContext(configFileString, nil) assert.True(t, config.QueryRange.ResultsCacheConfig.CacheConfig.EmbeddedCache.Enabled) }) + + t.Run("querier worker grpc client behavior", func(t *testing.T) { + newConfigBothClientsSet := `--- +frontend_worker: + query_frontend_grpc_client: + tls_server_name: query-frontend + query_scheduler_grpc_client: + tls_server_name: query-scheduler +` + + oldConfig := `--- +frontend_worker: + grpc_client_config: + tls_server_name: query-frontend +` + + mixedConfig := `--- +frontend_worker: + grpc_client_config: + tls_server_name: query-frontend-old + query_frontend_grpc_client: + tls_server_name: query-frontend-new + query_scheduler_grpc_client: + tls_server_name: query-scheduler +` + t.Run("new configs are used", func(t *testing.T) { + asserts := func(config ConfigWrapper) { + require.EqualValues(t, "query-frontend", config.Worker.NewQueryFrontendGRPCClientConfig.TLS.ServerName) + require.EqualValues(t, "query-scheduler", config.Worker.QuerySchedulerGRPCClientConfig.TLS.ServerName) + // we never want to use zero values by default. + require.NotEqualValues(t, 0, config.Worker.NewQueryFrontendGRPCClientConfig.MaxRecvMsgSize) + require.NotEqualValues(t, 0, config.Worker.QuerySchedulerGRPCClientConfig.MaxRecvMsgSize) + } + + yamlConfig, _, err := configWrapperFromYAML(t, newConfigBothClientsSet, nil) + require.NoError(t, err) + asserts(yamlConfig) + + // repeat the test using only cli flags. + cliFlags := []string{ + "-querier.frontend-grpc-client.tls-server-name=query-frontend", + "-querier.scheduler-grpc-client.tls-server-name=query-scheduler", + } + cliConfig, _, err := configWrapperFromYAML(t, emptyConfigString, cliFlags) + require.NoError(t, err) + asserts(cliConfig) + }) + + t.Run("old config works the same way", func(t *testing.T) { + asserts := func(config ConfigWrapper) { + require.EqualValues(t, "query-frontend", config.Worker.NewQueryFrontendGRPCClientConfig.TLS.ServerName) + require.EqualValues(t, "query-frontend", config.Worker.QuerySchedulerGRPCClientConfig.TLS.ServerName) + + // we never want to use zero values by default. + require.NotEqualValues(t, 0, config.Worker.NewQueryFrontendGRPCClientConfig.MaxRecvMsgSize) + require.NotEqualValues(t, 0, config.Worker.QuerySchedulerGRPCClientConfig.MaxRecvMsgSize) + } + + yamlConfig, _, err := configWrapperFromYAML(t, oldConfig, nil) + require.NoError(t, err) + asserts(yamlConfig) + + // repeat the test using only cli flags. + cliFlags := []string{ + "-querier.frontend-client.tls-server-name=query-frontend", + } + cliConfig, _, err := configWrapperFromYAML(t, emptyConfigString, cliFlags) + require.NoError(t, err) + asserts(cliConfig) + }) + + t.Run("mixed frontend clients throws an error", func(t *testing.T) { + _, _, err := configWrapperFromYAML(t, mixedConfig, nil) + require.Error(t, err) + + // repeat the test using only cli flags. + _, _, err = configWrapperFromYAML(t, emptyConfigString, []string{ + "-querier.frontend-client.tls-server-name=query-frontend", + "-querier.frontend-grpc-client.tls-server-name=query-frontend", + }) + require.Error(t, err) + + // repeat the test mixing the YAML with cli flags. + _, _, err = configWrapperFromYAML(t, newConfigBothClientsSet, []string{ + "-querier.frontend-client.tls-server-name=query-frontend", + }) + require.Error(t, err) + }) + + t.Run("mix correct cli flags with YAML configs", func(t *testing.T) { + config, _, err := configWrapperFromYAML(t, newConfigBothClientsSet, []string{ + "-querier.scheduler-grpc-client.tls-enabled=true", + }) + require.NoError(t, err) + + require.EqualValues(t, "query-frontend", config.Worker.NewQueryFrontendGRPCClientConfig.TLS.ServerName) + require.EqualValues(t, "query-scheduler", config.Worker.QuerySchedulerGRPCClientConfig.TLS.ServerName) + // we never want to use zero values by default. + require.NotEqualValues(t, 0, config.Worker.NewQueryFrontendGRPCClientConfig.MaxRecvMsgSize) + require.NotEqualValues(t, 0, config.Worker.QuerySchedulerGRPCClientConfig.MaxRecvMsgSize) + require.True(t, config.Worker.QuerySchedulerGRPCClientConfig.TLSEnabled) + }) + }) } const defaultResulsCacheString = `--- diff --git a/pkg/querier/worker/frontend_processor.go b/pkg/querier/worker/frontend_processor.go index a0e3569359bfa..1327a30ae3190 100644 --- a/pkg/querier/worker/frontend_processor.go +++ b/pkg/querier/worker/frontend_processor.go @@ -30,7 +30,7 @@ func newFrontendProcessor(cfg Config, handler RequestHandler, log log.Logger, co log: log, handler: handler, codec: codec, - maxMessageSize: cfg.GRPCClientConfig.MaxSendMsgSize, + maxMessageSize: cfg.NewQueryFrontendGRPCClientConfig.MaxSendMsgSize, querierID: cfg.QuerierID, } } diff --git a/pkg/querier/worker/scheduler_processor.go b/pkg/querier/worker/scheduler_processor.go index 00b08219e5dbe..97f6d8f4d1df9 100644 --- a/pkg/querier/worker/scheduler_processor.go +++ b/pkg/querier/worker/scheduler_processor.go @@ -38,9 +38,9 @@ func newSchedulerProcessor(cfg Config, handler RequestHandler, log log.Logger, m log: log, handler: handler, codec: codec, - maxMessageSize: cfg.GRPCClientConfig.MaxSendMsgSize, + maxMessageSize: cfg.NewQueryFrontendGRPCClientConfig.MaxRecvMsgSize, querierID: cfg.QuerierID, - grpcConfig: cfg.GRPCClientConfig, + grpcConfig: cfg.NewQueryFrontendGRPCClientConfig, schedulerClientFactory: func(conn *grpc.ClientConn) schedulerpb.SchedulerForQuerierClient { return schedulerpb.NewSchedulerForQuerierClient(conn) }, diff --git a/pkg/querier/worker/worker.go b/pkg/querier/worker/worker.go index bc41a49d9075d..7d7b46dc814f5 100644 --- a/pkg/querier/worker/worker.go +++ b/pkg/querier/worker/worker.go @@ -30,7 +30,10 @@ type Config struct { QuerierID string `yaml:"id"` - GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` + NewQueryFrontendGRPCClientConfig grpcclient.Config `yaml:"query_frontend_grpc_client" doc:"description=Configures the querier gRPC client used to communicate with the query-frontend. Shouldn't be used in conjunction with 'grpc_client_config'."` + OldQueryFrontendGRPCClientConfig grpcclient.Config `yaml:"grpc_client_config" doc:"description=Configures the querier gRPC client used to communicate with the query-frontend and with the query-scheduler if 'query_scheduler_grpc_client' isn't defined. This shouldn't be used if 'query_frontend_grpc_client' is defined."` + + QuerySchedulerGRPCClientConfig grpcclient.Config `yaml:"query_scheduler_grpc_client" doc:"description=Configures the querier gRPC client used to communicate with the query-scheduler. If not defined, 'grpc_client_config' is used instead."` } func (cfg *Config) RegisterFlags(f *flag.FlagSet) { @@ -39,14 +42,25 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.DNSLookupPeriod, "querier.dns-lookup-period", 3*time.Second, "How often to query DNS for query-frontend or query-scheduler address. Also used to determine how often to poll the scheduler-ring for addresses if the scheduler-ring is configured.") f.StringVar(&cfg.QuerierID, "querier.id", "", "Querier ID, sent to frontend service to identify requests from the same querier. Defaults to hostname.") - cfg.GRPCClientConfig.RegisterFlagsWithPrefix("querier.frontend-client", f) + // Register old client as the frontend-client flag for retro-compatibility. + cfg.OldQueryFrontendGRPCClientConfig.RegisterFlagsWithPrefix("querier.frontend-client", f) + + cfg.NewQueryFrontendGRPCClientConfig.RegisterFlagsWithPrefix("querier.frontend-grpc-client", f) + cfg.QuerySchedulerGRPCClientConfig.RegisterFlagsWithPrefix("querier.scheduler-grpc-client", f) } func (cfg *Config) Validate() error { if cfg.FrontendAddress != "" && cfg.SchedulerAddress != "" { return errors.New("frontend address and scheduler address are mutually exclusive, please use only one") } - return cfg.GRPCClientConfig.Validate() + if err := cfg.NewQueryFrontendGRPCClientConfig.Validate(); err != nil { + return err + } + if err := cfg.OldQueryFrontendGRPCClientConfig.Validate(); err != nil { + return err + } + + return cfg.QuerySchedulerGRPCClientConfig.Validate() } // Handler for HTTP requests wrapped in protobuf messages. @@ -80,7 +94,6 @@ type processor interface { type querierWorker struct { *services.BasicService - cfg Config logger log.Logger processor processor @@ -92,6 +105,9 @@ type querierWorker struct { managers map[string]*processorManager metrics *Metrics + + grpcClientConfig grpcclient.Config + maxConcurrentRequests int } func NewQuerierWorker(cfg Config, rng ring.ReadRing, handler RequestHandler, logger log.Logger, reg prometheus.Registerer, codec RequestCodec) (services.Service, error) { @@ -105,16 +121,19 @@ func NewQuerierWorker(cfg Config, rng ring.ReadRing, handler RequestHandler, log metrics := NewMetrics(cfg, reg) var processor processor + var grpcCfg grpcclient.Config var servs []services.Service var address string switch { case rng != nil: level.Info(logger).Log("msg", "Starting querier worker using query-scheduler and scheduler ring for addresses") + grpcCfg = cfg.QuerySchedulerGRPCClientConfig processor, servs = newSchedulerProcessor(cfg, handler, logger, metrics, codec) case cfg.SchedulerAddress != "": level.Info(logger).Log("msg", "Starting querier worker connected to query-scheduler", "scheduler", cfg.SchedulerAddress) + grpcCfg = cfg.QuerySchedulerGRPCClientConfig address = cfg.SchedulerAddress processor, servs = newSchedulerProcessor(cfg, handler, logger, metrics, codec) @@ -122,26 +141,28 @@ func NewQuerierWorker(cfg Config, rng ring.ReadRing, handler RequestHandler, log level.Info(logger).Log("msg", "Starting querier worker connected to query-frontend", "frontend", cfg.FrontendAddress) address = cfg.FrontendAddress + grpcCfg = cfg.NewQueryFrontendGRPCClientConfig processor = newFrontendProcessor(cfg, handler, logger, codec) default: return nil, errors.New("unable to start the querier worker, need to configure one of frontend_address, scheduler_address, or a ring config in the query_scheduler config block") } - return newQuerierWorkerWithProcessor(cfg, metrics, logger, processor, address, rng, servs) + return newQuerierWorkerWithProcessor(grpcCfg, cfg.MaxConcurrent, cfg.DNSLookupPeriod, metrics, logger, processor, address, rng, servs) } -func newQuerierWorkerWithProcessor(cfg Config, metrics *Metrics, logger log.Logger, processor processor, address string, ring ring.ReadRing, servs []services.Service) (*querierWorker, error) { +func newQuerierWorkerWithProcessor(grpcCfg grpcclient.Config, maxConcReq int, dnsLookupPeriod time.Duration, metrics *Metrics, logger log.Logger, processor processor, address string, ring ring.ReadRing, servs []services.Service) (*querierWorker, error) { f := &querierWorker{ - cfg: cfg, - logger: logger, - managers: map[string]*processorManager{}, - processor: processor, - metrics: metrics, + maxConcurrentRequests: maxConcReq, + grpcClientConfig: grpcCfg, + logger: logger, + managers: map[string]*processorManager{}, + processor: processor, + metrics: metrics, } // Empty address is only used in tests, where individual targets are added manually. if address != "" { - w, err := util.NewDNSWatcher(address, cfg.DNSLookupPeriod, f) + w, err := util.NewDNSWatcher(address, dnsLookupPeriod, f) if err != nil { return nil, err } @@ -150,7 +171,7 @@ func newQuerierWorkerWithProcessor(cfg Config, metrics *Metrics, logger log.Logg } if ring != nil { - w, err := util.NewRingWatcher(log.With(logger, "component", "querier-scheduler-worker"), ring, cfg.DNSLookupPeriod, f) + w, err := util.NewRingWatcher(log.With(logger, "component", "querier-scheduler-worker"), ring, dnsLookupPeriod, f) if err != nil { return nil, err } @@ -245,17 +266,17 @@ func (w *querierWorker) resetConcurrency() { }() for _, m := range w.managers { - concurrency := w.cfg.MaxConcurrent / len(w.managers) + concurrency := w.maxConcurrentRequests / len(w.managers) // If max concurrency does not evenly divide into our frontends a subset will be chosen // to receive an extra connection. Frontend addresses were shuffled above so this will be a // random selection of frontends. - if index < w.cfg.MaxConcurrent%len(w.managers) { + if index < w.maxConcurrentRequests%len(w.managers) { level.Warn(w.logger).Log("msg", "max concurrency is not evenly divisible across targets, adding an extra connection", "addr", m.address) concurrency++ } - // If concurrency is 0 then MaxConcurrentRequests is less than the total number of + // If concurrency is 0 then maxConcurrentRequests is less than the total number of // frontends/schedulers. In order to prevent accidentally starving a frontend or scheduler we are just going to // always connect once to every target. This is dangerous b/c we may start exceeding LogQL // max concurrency. @@ -271,7 +292,7 @@ func (w *querierWorker) resetConcurrency() { func (w *querierWorker) connect(ctx context.Context, address string) (*grpc.ClientConn, error) { // Because we only use single long-running method, it doesn't make sense to inject user ID, send over tracing or add metrics. - opts, err := w.cfg.GRPCClientConfig.DialOption(nil, nil) + opts, err := w.grpcClientConfig.DialOption(nil, nil) if err != nil { return nil, err } diff --git a/pkg/querier/worker/worker_test.go b/pkg/querier/worker/worker_test.go index fb311925fb207..1633554b7a136 100644 --- a/pkg/querier/worker/worker_test.go +++ b/pkg/querier/worker/worker_test.go @@ -7,6 +7,8 @@ import ( "time" "github.com/go-kit/log" + "github.com/grafana/dskit/crypto/tls" + "github.com/grafana/dskit/grpcclient" "github.com/grafana/dskit/services" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -54,7 +56,7 @@ func TestResetConcurrency(t *testing.T) { MaxConcurrent: tt.maxConcurrent, } - w, err := newQuerierWorkerWithProcessor(cfg, NewMetrics(cfg, nil), log.NewNopLogger(), &mockProcessor{}, "", nil, nil) + w, err := newQuerierWorkerWithProcessor(cfg.QuerySchedulerGRPCClientConfig, cfg.MaxConcurrent, cfg.DNSLookupPeriod, NewMetrics(cfg, nil), log.NewNopLogger(), &mockProcessor{}, "", nil, nil) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), w)) @@ -93,3 +95,35 @@ func (m mockProcessor) processQueriesOnSingleStream(ctx context.Context, _ *grpc } func (m mockProcessor) notifyShutdown(_ context.Context, _ *grpc.ClientConn, _ string) {} + +func TestGRPCConfigBehavior(t *testing.T) { + logger := log.NewNopLogger() + + t.Run("uses separated GRPC TLS server names", func(t *testing.T) { + cfg := Config{ + SchedulerAddress: "scheduler:9095", + QuerySchedulerGRPCClientConfig: grpcclient.Config{ + TLS: tls.ClientConfig{ + ServerName: "query-scheduler", + }, + }, + NewQueryFrontendGRPCClientConfig: grpcclient.Config{ + TLS: tls.ClientConfig{ + ServerName: "query-frontend", + }, + }, + } + + qw, err := NewQuerierWorker(cfg, nil, nil, logger, nil, nil) + require.NoError(t, err) + require.NoError(t, services.StopAndAwaitTerminated(context.Background(), qw)) + + // grpc client the querier uses to talk to the scheduler, so the expected server name is "query-scheduler". + castedQw := qw.(*querierWorker) + require.Equal(t, "query-scheduler", castedQw.grpcClientConfig.TLS.ServerName) + + // grpc client the querier uses to return results to the frontend, so the expected server name is "query-frontend". + sp := castedQw.processor.(*schedulerProcessor) + require.Equal(t, "query-frontend", sp.grpcConfig.TLS.ServerName) + }) +} diff --git a/tools/doc-generator/main.go b/tools/doc-generator/main.go index 24f6c82ef0cff..7648bb407f6fc 100644 --- a/tools/doc-generator/main.go +++ b/tools/doc-generator/main.go @@ -104,6 +104,12 @@ func generateBlocksMarkdown(blocks []*parse.ConfigBlock) string { return 1 } + if a.FlagsPrefix < b.FlagsPrefix { + return -1 + } + if a.FlagsPrefix < b.FlagsPrefix { + return 1 + } return 0 }) From 195132215487f10fb5ed6f6cc37070762121d9bb Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Mon, 6 May 2024 09:19:32 -0500 Subject: [PATCH 03/47] chore(blooms): records more bloom iteration stats (#12889) --- pkg/bloomgateway/bloomgateway.go | 24 +++- pkg/bloomgateway/multiplexing.go | 28 ++-- pkg/bloomgateway/processor.go | 15 ++- pkg/bloomgateway/processor_test.go | 5 + pkg/storage/bloom/v1/fuse.go | 201 ++++++++++++++++++++++++----- pkg/storage/bloom/v1/fuse_test.go | 2 + pkg/storage/bloom/v1/metrics.go | 20 +++ 7 files changed, 243 insertions(+), 52 deletions(-) diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go index b4ccd0bee075d..ab2637b91d8e7 100644 --- a/pkg/bloomgateway/bloomgateway.go +++ b/pkg/bloomgateway/bloomgateway.go @@ -63,7 +63,9 @@ import ( "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" "github.com/grafana/loki/v3/pkg/util" "github.com/grafana/loki/v3/pkg/util/constants" + util_log "github.com/grafana/loki/v3/pkg/util/log" utillog "github.com/grafana/loki/v3/pkg/util/log" + "github.com/grafana/loki/v3/pkg/util/spanlogger" ) var errGatewayUnhealthy = errors.New("bloom-gateway is unhealthy in the ring") @@ -87,7 +89,7 @@ type Gateway struct { queue *queue.RequestQueue activeUsers *util.ActiveUsersCleanupService - bloomStore bloomshipper.Store + bloomStore bloomshipper.StoreWithMetrics pendingTasks *atomic.Int64 @@ -109,7 +111,7 @@ func (l *fixedQueueLimits) MaxConsumers(_ string, _ int) int { } // New returns a new instance of the Bloom Gateway. -func New(cfg Config, store bloomshipper.Store, logger log.Logger, reg prometheus.Registerer) (*Gateway, error) { +func New(cfg Config, store bloomshipper.StoreWithMetrics, logger log.Logger, reg prometheus.Registerer) (*Gateway, error) { utillog.WarnExperimentalUse("Bloom Gateway", logger) g := &Gateway{ cfg: cfg, @@ -203,13 +205,15 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk return nil, err } - logger := log.With(g.logger, "tenant", tenantID) - sp, ctx := opentracing.StartSpanFromContext(ctx, "bloomgateway.FilterChunkRefs") stats, ctx := ContextWithEmptyStats(ctx) + logger := spanlogger.FromContextWithFallback( + ctx, + util_log.WithContext(ctx, g.logger), + ) + defer func() { level.Info(logger).Log(stats.KVArgs()...) - sp.LogKV(stats.KVArgs()...) sp.Finish() }() @@ -319,6 +323,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk preFilterChunks += len(series.Refs) } + combinedRecorder := v1.NewBloomRecorder(ctx, "combined") for remaining > 0 { select { case <-ctx.Done(): @@ -330,10 +335,12 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk return nil, errors.Wrap(task.Err(), "request failed") } responses = append(responses, task.responses) + combinedRecorder.Merge(task.recorder) remaining-- } } + combinedRecorder.Report(util_log.WithContext(ctx, g.logger), g.bloomStore.BloomMetrics()) sp.LogKV("msg", "received all responses") start := time.Now() @@ -348,7 +355,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk postFilterSeries := len(filtered) - for _, group := range req.Refs { + for _, group := range filtered { postFilterChunks += len(group.Refs) } g.metrics.requestedSeries.Observe(float64(preFilterSeries)) @@ -421,7 +428,10 @@ func orderedResponsesByFP(responses [][]v1.Output) v1.Iterator[v1.Output] { // TODO(owen-d): improve perf. This can be faster with a more specialized impl // NB(owen-d): `req` is mutated in place for performance, but `responses` is not // Removals of the outputs must be sorted. -func filterChunkRefs(req *logproto.FilterChunkRefRequest, responses [][]v1.Output) []*logproto.GroupedChunkRefs { +func filterChunkRefs( + req *logproto.FilterChunkRefRequest, + responses [][]v1.Output, +) []*logproto.GroupedChunkRefs { res := make([]*logproto.GroupedChunkRefs, 0, len(req.Refs)) // dedupe outputs, merging the same series. diff --git a/pkg/bloomgateway/multiplexing.go b/pkg/bloomgateway/multiplexing.go index b3d80d4a0fb08..08afeffcbf70c 100644 --- a/pkg/bloomgateway/multiplexing.go +++ b/pkg/bloomgateway/multiplexing.go @@ -69,11 +69,15 @@ type Task struct { // log enqueue time so we can observe the time spent in the queue enqueueTime time.Time + + // recorder + recorder *v1.BloomRecorder } func newTask(ctx context.Context, tenantID string, refs seriesWithInterval, filters []syntax.LineFilterExpr, blocks []bloomshipper.BlockRef) Task { return Task{ tenant: tenantID, + recorder: v1.NewBloomRecorder(ctx, "task"), err: new(wrappedError), resCh: make(chan v1.Output), filters: filters, @@ -113,6 +117,7 @@ func (t Task) CloseWithError(err error) { // Copy returns a copy of the existing task but with a new slice of grouped chunk refs func (t Task) Copy(series []*logproto.GroupedChunkRefs) Task { return Task{ + recorder: t.recorder, tenant: t.tenant, err: t.err, resCh: t.resCh, @@ -126,22 +131,26 @@ func (t Task) Copy(series []*logproto.GroupedChunkRefs) Task { } } -func (t Task) RequestIter(tokenizer *v1.NGramTokenizer) v1.Iterator[v1.Request] { +func (t Task) RequestIter( + tokenizer *v1.NGramTokenizer, +) v1.Iterator[v1.Request] { return &requestIterator{ - series: v1.NewSliceIter(t.series), - search: v1.FiltersToBloomTest(tokenizer, t.filters...), - channel: t.resCh, - curr: v1.Request{}, + recorder: t.recorder, + series: v1.NewSliceIter(t.series), + search: v1.FiltersToBloomTest(tokenizer, t.filters...), + channel: t.resCh, + curr: v1.Request{}, } } var _ v1.Iterator[v1.Request] = &requestIterator{} type requestIterator struct { - series v1.Iterator[*logproto.GroupedChunkRefs] - search v1.BloomTest - channel chan<- v1.Output - curr v1.Request + recorder *v1.BloomRecorder + series v1.Iterator[*logproto.GroupedChunkRefs] + search v1.BloomTest + channel chan<- v1.Output + curr v1.Request } // At implements v1.Iterator. @@ -162,6 +171,7 @@ func (it *requestIterator) Next() bool { } group := it.series.At() it.curr = v1.Request{ + Recorder: it.recorder, Fp: model.Fingerprint(group.Fingerprint), Chks: convertToChunkRefs(group.Refs), Search: it.search, diff --git a/pkg/bloomgateway/processor.go b/pkg/bloomgateway/processor.go index 947296d5712c4..1e8452ded5d66 100644 --- a/pkg/bloomgateway/processor.go +++ b/pkg/bloomgateway/processor.go @@ -7,7 +7,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/grafana/dskit/concurrency" @@ -155,11 +154,15 @@ func (p *processor) processBlock(_ context.Context, blockQuerier *v1.BlockQuerie iters := make([]v1.PeekingIterator[v1.Request], 0, len(tasks)) for _, task := range tasks { - if sp := opentracing.SpanFromContext(task.ctx); sp != nil { - md, _ := blockQuerier.Metadata() - blk := bloomshipper.BlockRefFrom(task.tenant, task.table.String(), md) - sp.LogKV("process block", blk.String(), "series", len(task.series)) - } + // NB(owen-d): can be helpful for debugging, but is noisy + // and don't feel like threading this through a configuration + + // if sp := opentracing.SpanFromContext(task.ctx); sp != nil { + // md, _ := blockQuerier.Metadata() + // blk := bloomshipper.BlockRefFrom(task.tenant, task.table.String(), md) + // blockID := blk.String() + // sp.LogKV("process block", blockID, "series", len(task.series)) + // } it := v1.NewPeekingIter(task.RequestIter(tokenizer)) iters = append(iters, it) diff --git a/pkg/bloomgateway/processor_test.go b/pkg/bloomgateway/processor_test.go index b86dbf8006b78..f9dc847f588bd 100644 --- a/pkg/bloomgateway/processor_test.go +++ b/pkg/bloomgateway/processor_test.go @@ -15,6 +15,7 @@ import ( "go.uber.org/atomic" "github.com/grafana/loki/v3/pkg/logql/syntax" + v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/chunk/client" "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" @@ -40,6 +41,10 @@ type dummyStore struct { err error } +func (s *dummyStore) BloomMetrics() *v1.Metrics { + return v1.NewMetrics(nil) +} + func (s *dummyStore) ResolveMetas(_ context.Context, _ bloomshipper.MetaSearchParams) ([][]bloomshipper.MetaRef, []*bloomshipper.Fetcher, error) { time.Sleep(s.delay) diff --git a/pkg/storage/bloom/v1/fuse.go b/pkg/storage/bloom/v1/fuse.go index 0ee608cede8ee..ed920072b8ca0 100644 --- a/pkg/storage/bloom/v1/fuse.go +++ b/pkg/storage/bloom/v1/fuse.go @@ -1,10 +1,15 @@ package v1 import ( + "context" + "github.com/efficientgo/core/errors" "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/prometheus/common/model" + "go.uber.org/atomic" + + "github.com/grafana/loki/v3/pkg/util/spanlogger" ) type Request struct { @@ -12,6 +17,102 @@ type Request struct { Chks ChunkRefs Search BloomTest Response chan<- Output + Recorder *BloomRecorder +} + +// BloomRecorder records the results of a bloom search +func NewBloomRecorder(ctx context.Context, id string) *BloomRecorder { + return &BloomRecorder{ + ctx: ctx, + id: id, + seriesFound: atomic.NewInt64(0), + chunksFound: atomic.NewInt64(0), + seriesSkipped: atomic.NewInt64(0), + chunksSkipped: atomic.NewInt64(0), + seriesMissed: atomic.NewInt64(0), + chunksMissed: atomic.NewInt64(0), + chunksFiltered: atomic.NewInt64(0), + } +} + +type BloomRecorder struct { + ctx context.Context + id string + // exists in the bloom+queried + seriesFound, chunksFound *atomic.Int64 + // exists in bloom+skipped + seriesSkipped, chunksSkipped *atomic.Int64 + // not found in bloom + seriesMissed, chunksMissed *atomic.Int64 + // filtered out + chunksFiltered *atomic.Int64 +} + +func (r *BloomRecorder) Merge(other *BloomRecorder) { + r.seriesFound.Add(other.seriesFound.Load()) + r.chunksFound.Add(other.chunksFound.Load()) + r.seriesSkipped.Add(other.seriesSkipped.Load()) + r.chunksSkipped.Add(other.chunksSkipped.Load()) + r.seriesMissed.Add(other.seriesMissed.Load()) + r.chunksMissed.Add(other.chunksMissed.Load()) + r.chunksFiltered.Add(other.chunksFiltered.Load()) +} + +func (r *BloomRecorder) Report(logger log.Logger, metrics *Metrics) { + logger = spanlogger.FromContextWithFallback(r.ctx, logger) + + var ( + seriesFound = r.seriesFound.Load() + seriesSkipped = r.seriesSkipped.Load() + seriesMissed = r.seriesMissed.Load() + seriesRequested = seriesFound + seriesSkipped + seriesMissed + + chunksFound = r.chunksFound.Load() + chunksSkipped = r.chunksSkipped.Load() + chunksMissed = r.chunksMissed.Load() + chunksFiltered = r.chunksFiltered.Load() + chunksRequested = chunksFound + chunksSkipped + chunksMissed + ) + level.Debug(logger).Log( + "recorder_msg", "bloom search results", + "recorder_id", r.id, + + "recorder_series_requested", seriesRequested, + "recorder_series_found", seriesFound, + "recorder_series_skipped", seriesSkipped, + "recorder_series_missed", seriesMissed, + + "recorder_chunks_requested", chunksRequested, + "recorder_chunks_found", chunksFound, + "recorder_chunks_skipped", chunksSkipped, + "recorder_chunks_missed", chunksMissed, + "recorder_chunks_filtered", chunksFiltered, + ) + + if metrics != nil { + metrics.recorderSeries.WithLabelValues(recorderRequested).Add(float64(seriesRequested)) + metrics.recorderSeries.WithLabelValues(recorderFound).Add(float64(seriesFound)) + metrics.recorderSeries.WithLabelValues(recorderSkipped).Add(float64(seriesSkipped)) + metrics.recorderSeries.WithLabelValues(recorderMissed).Add(float64(seriesMissed)) + + metrics.recorderChunks.WithLabelValues(recorderRequested).Add(float64(chunksRequested)) + metrics.recorderChunks.WithLabelValues(recorderFound).Add(float64(chunksFound)) + metrics.recorderChunks.WithLabelValues(recorderSkipped).Add(float64(chunksSkipped)) + metrics.recorderChunks.WithLabelValues(recorderMissed).Add(float64(chunksMissed)) + metrics.recorderChunks.WithLabelValues(recorderFiltered).Add(float64(chunksFiltered)) + } +} + +func (r *BloomRecorder) record( + seriesFound, chunksFound, seriesSkipped, chunksSkipped, seriesMissed, chunksMissed, chunksFiltered int, +) { + r.seriesFound.Add(int64(seriesFound)) + r.chunksFound.Add(int64(chunksFound)) + r.seriesSkipped.Add(int64(seriesSkipped)) + r.chunksSkipped.Add(int64(chunksSkipped)) + r.seriesMissed.Add(int64(seriesMissed)) + r.chunksMissed.Add(int64(chunksMissed)) + r.chunksFiltered.Add(int64(chunksFiltered)) } // Output represents a chunk that failed to pass all searches @@ -59,8 +160,50 @@ func NewFusedQuerier(bq *BlockQuerier, inputs []PeekingIterator[Request], logger } } -func (fq *FusedQuerier) noRemovals(batch []Request, fp model.Fingerprint) { +func (fq *FusedQuerier) recordMissingFp( + batch []Request, + fp model.Fingerprint, +) { + fq.noRemovals(batch, fp, func(input Request) { + input.Recorder.record( + 0, 0, // found + 0, 0, // skipped + 1, len(input.Chks), // missed + 0, // chunks filtered + ) + }) +} + +func (fq *FusedQuerier) recordSkippedFp( + batch []Request, + fp model.Fingerprint, +) { + fq.noRemovals(batch, fp, func(input Request) { + input.Recorder.record( + 0, 0, // found + 1, len(input.Chks), // skipped + 0, 0, // missed + 0, // chunks filtered + ) + }) +} + +func (fq *FusedQuerier) noRemovals( + batch []Request, + fp model.Fingerprint, + fn func(Request), +) { for _, input := range batch { + if fp != input.Fp { + // should not happen, but log just in case + level.Error(fq.logger).Log( + "msg", "fingerprint mismatch", + "expected", fp, + "actual", input.Fp, + "block", "TODO", + ) + } + fn(input) input.Response <- Output{ Fp: fp, Removals: nil, @@ -94,7 +237,7 @@ func (fq *FusedQuerier) Run() error { if series.Fingerprint != fp { // fingerprint not found, can't remove chunks level.Debug(fq.logger).Log("msg", "fingerprint not found", "fp", series.Fingerprint, "err", fq.bq.series.Err()) - fq.noRemovals(nextBatch, fp) + fq.recordMissingFp(nextBatch, fp) continue } @@ -103,51 +246,49 @@ func (fq *FusedQuerier) Run() error { if skip { // could not seek to the desired bloom, // likely because the page was too large to load - fq.noRemovals(nextBatch, fp) + fq.recordSkippedFp(nextBatch, fp) continue } if !fq.bq.blooms.Next() { // fingerprint not found, can't remove chunks level.Debug(fq.logger).Log("msg", "fingerprint not found", "fp", series.Fingerprint, "err", fq.bq.blooms.Err()) - fq.noRemovals(nextBatch, fp) + fq.recordMissingFp(nextBatch, fp) continue } bloom := fq.bq.blooms.At() // test every input against this chunk for _, input := range nextBatch { - _, inBlooms := input.Chks.Compare(series.Chunks, true) - - // First, see if the search passes the series level bloom before checking for chunks individually - if !input.Search.Matches(bloom) { - // We return all the chunks that were the intersection of the query - // because they for sure do not match the search and don't - // need to be downloaded - input.Response <- Output{ - Fp: fp, - Removals: inBlooms, - } - continue - } + missing, inBlooms := input.Chks.Compare(series.Chunks, true) - // TODO(owen-d): pool - var removals ChunkRefs + var ( + // TODO(owen-d): pool + removals ChunkRefs + // TODO(salvacorts): pool tokenBuf + tokenBuf []byte + prefixLen int + ) - // TODO(salvacorts): pool tokenBuf - var tokenBuf []byte - var prefixLen int - - for _, chk := range inBlooms { - // Get buf to concatenate the chunk and search token - tokenBuf, prefixLen = prefixedToken(schema.NGramLen(), chk, tokenBuf) - if !input.Search.MatchesWithPrefixBuf(bloom, tokenBuf, prefixLen) { - removals = append(removals, chk) - continue + // First, see if the search passes the series level bloom before checking for chunks individually + if matchedSeries := input.Search.Matches(bloom); !matchedSeries { + removals = inBlooms + } else { + for _, chk := range inBlooms { + // Get buf to concatenate the chunk and search token + tokenBuf, prefixLen = prefixedToken(schema.NGramLen(), chk, tokenBuf) + if !input.Search.MatchesWithPrefixBuf(bloom, tokenBuf, prefixLen) { + removals = append(removals, chk) + } } - // Otherwise, the chunk passed all the searches } + input.Recorder.record( + 1, len(inBlooms), // found + 0, 0, // skipped + 0, len(missing), // missed + len(removals), // filtered + ) input.Response <- Output{ Fp: fp, Removals: removals, diff --git a/pkg/storage/bloom/v1/fuse_test.go b/pkg/storage/bloom/v1/fuse_test.go index b86d6259ebfa1..a0dc23001e939 100644 --- a/pkg/storage/bloom/v1/fuse_test.go +++ b/pkg/storage/bloom/v1/fuse_test.go @@ -89,6 +89,7 @@ func TestFusedQuerier(t *testing.T) { for j := 0; j < n; j++ { idx := numSeries/nReqs*i + j reqs = append(reqs, Request{ + Recorder: NewBloomRecorder(context.Background(), "unknown"), Fp: data[idx].Series.Fingerprint, Chks: data[idx].Series.Chunks, Response: ch, @@ -282,6 +283,7 @@ func setupBlockForBenchmark(b *testing.B) (*BlockQuerier, [][]Request, []chan Ou idx = numSeries - 1 } reqs = append(reqs, Request{ + Recorder: NewBloomRecorder(context.Background(), "unknown"), Fp: data[idx].Series.Fingerprint, Chks: data[idx].Series.Chunks, Response: ch, diff --git a/pkg/storage/bloom/v1/metrics.go b/pkg/storage/bloom/v1/metrics.go index 700acfc05c673..4c6b4cee11326 100644 --- a/pkg/storage/bloom/v1/metrics.go +++ b/pkg/storage/bloom/v1/metrics.go @@ -28,6 +28,9 @@ type Metrics struct { pagesSkipped *prometheus.CounterVec bytesRead *prometheus.CounterVec bytesSkipped *prometheus.CounterVec + + recorderSeries *prometheus.CounterVec + recorderChunks *prometheus.CounterVec } const ( @@ -52,6 +55,12 @@ const ( bloomCreationTypeIndexed = "indexed" bloomCreationTypeSkipped = "skipped" + + recorderRequested = "requested" + recorderFound = "found" + recorderSkipped = "skipped" + recorderMissed = "missed" + recorderFiltered = "filtered" ) func NewMetrics(r prometheus.Registerer) *Metrics { @@ -148,5 +157,16 @@ func NewMetrics(r prometheus.Registerer) *Metrics { Name: "bloom_bytes_skipped_total", Help: "Number of bytes skipped during query iteration", }, []string{"type", "reason"}), + + recorderSeries: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Namespace: constants.Loki, + Name: "bloom_recorder_series_total", + Help: "Number of series reported by the bloom query recorder. Type can be requested (total), found (existed in blooms), skipped (due to page too large configurations, etc), missed (not found in blooms)", + }, []string{"type"}), + recorderChunks: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Namespace: constants.Loki, + Name: "bloom_recorder_chunks_total", + Help: "Number of chunks reported by the bloom query recorder. Type can be requested (total), found (existed in blooms), skipped (due to page too large configurations, etc), missed (not found in blooms), filtered (filtered out)", + }, []string{"type"}), } } From 738c274a5828aab4d88079c38400ddc705c0cb5d Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Mon, 6 May 2024 16:26:16 +0200 Subject: [PATCH 04/47] fix(blooms): Fix `partitionSeriesByDay` function (#12900) The function `partitionSeriesByDay` could yield empty when chunks were outside of the requested time range. This would result in requests for a time range that does not contain any chunks for a series to filter. These requests would return errors, because a subsequent `partitionSeriesByDay` for would remove that day resulting in no task. Signed-off-by: Christian Haudum --- pkg/bloomgateway/bloomgateway.go | 2 +- pkg/bloomgateway/util.go | 11 ++------- pkg/bloomgateway/util_test.go | 42 +++++++++++++++++++++++++++++--- 3 files changed, 42 insertions(+), 13 deletions(-) diff --git a/pkg/bloomgateway/bloomgateway.go b/pkg/bloomgateway/bloomgateway.go index ab2637b91d8e7..ee0e6f9940fd2 100644 --- a/pkg/bloomgateway/bloomgateway.go +++ b/pkg/bloomgateway/bloomgateway.go @@ -274,7 +274,7 @@ func (g *Gateway) FilterChunkRefs(ctx context.Context, req *logproto.FilterChunk "series_requested", len(req.Refs), ) - if len(seriesByDay) != 1 { + if len(seriesByDay) > 1 { stats.Status = labelFailure return nil, errors.New("request time range must span exactly one day") } diff --git a/pkg/bloomgateway/util.go b/pkg/bloomgateway/util.go index bef6fcc5d4da1..9617202b948c3 100644 --- a/pkg/bloomgateway/util.go +++ b/pkg/bloomgateway/util.go @@ -2,7 +2,6 @@ package bloomgateway import ( "sort" - "time" "github.com/prometheus/common/model" "golang.org/x/exp/slices" @@ -13,13 +12,8 @@ import ( "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" ) -func getDayTime(ts model.Time) time.Time { - return ts.Time().UTC().Truncate(Day) -} - func truncateDay(ts model.Time) model.Time { - // model.minimumTick is time.Millisecond - return ts - (ts % model.Time(24*time.Hour/time.Millisecond)) + return model.TimeFromUnix(ts.Time().Truncate(Day).Unix()) } // getFromThrough assumes a list of ShortRefs sorted by From time @@ -125,7 +119,7 @@ func partitionSeriesByDay(from, through model.Time, seriesWithChunks []*logproto }) // All chunks fall outside of the range - if min == len(chunks) || max == 0 { + if min == len(chunks) || max == 0 || min == max { continue } @@ -135,7 +129,6 @@ func partitionSeriesByDay(from, through model.Time, seriesWithChunks []*logproto if chunks[max-1].Through > maxTs { maxTs = chunks[max-1].Through } - // fmt.Println("day", day, "series", series.Fingerprint, "minTs", minTs, "maxTs", maxTs) res = append(res, &logproto.GroupedChunkRefs{ Fingerprint: series.Fingerprint, diff --git a/pkg/bloomgateway/util_test.go b/pkg/bloomgateway/util_test.go index 9475853cffd34..a3f219c326efd 100644 --- a/pkg/bloomgateway/util_test.go +++ b/pkg/bloomgateway/util_test.go @@ -166,10 +166,46 @@ func TestPartitionRequest(t *testing.T) { exp []seriesWithInterval }{ - "empty": { + "no series": { inp: &logproto.FilterChunkRefRequest{ - From: ts.Add(-24 * time.Hour), - Through: ts, + From: ts.Add(-12 * time.Hour), + Through: ts.Add(12 * time.Hour), + Refs: []*logproto.GroupedChunkRefs{}, + }, + exp: []seriesWithInterval{}, + }, + + "no chunks for series": { + inp: &logproto.FilterChunkRefRequest{ + From: ts.Add(-12 * time.Hour), + Through: ts.Add(12 * time.Hour), + Refs: []*logproto.GroupedChunkRefs{ + { + Fingerprint: 0x00, + Refs: []*logproto.ShortRef{}, + }, + { + Fingerprint: 0x10, + Refs: []*logproto.ShortRef{}, + }, + }, + }, + exp: []seriesWithInterval{}, + }, + + "chunks before and after requested day": { + inp: &logproto.FilterChunkRefRequest{ + From: ts.Add(-2 * time.Hour), + Through: ts.Add(2 * time.Hour), + Refs: []*logproto.GroupedChunkRefs{ + { + Fingerprint: 0x00, + Refs: []*logproto.ShortRef{ + {From: ts.Add(-13 * time.Hour), Through: ts.Add(-12 * time.Hour)}, + {From: ts.Add(13 * time.Hour), Through: ts.Add(14 * time.Hour)}, + }, + }, + }, }, exp: []seriesWithInterval{}, }, From 7b77e310982147162777f9febfbcd98ec8a8c383 Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Mon, 6 May 2024 17:03:40 +0200 Subject: [PATCH 05/47] fix(blooms): Clean block directories recursively on startup (#12895) Any empty directories in the block directory cache directory should recursively be removed to avoid a lot of dangling, empty directories. Signed-off-by: Christian Haudum --- .../stores/shipper/bloomshipper/cache.go | 32 ++++++++++++ .../stores/shipper/bloomshipper/cache_test.go | 49 ++++++++++++++----- 2 files changed, 69 insertions(+), 12 deletions(-) diff --git a/pkg/storage/stores/shipper/bloomshipper/cache.go b/pkg/storage/stores/shipper/bloomshipper/cache.go index 3e08b53eac3f3..6ff6ef64948e3 100644 --- a/pkg/storage/stores/shipper/bloomshipper/cache.go +++ b/pkg/storage/stores/shipper/bloomshipper/cache.go @@ -45,6 +45,29 @@ func LoadBlocksDirIntoCache(paths []string, c Cache, logger log.Logger) error { return err.Err() } +func removeRecursively(root, path string) error { + if path == root { + // stop when reached root directory + return nil + } + + entries, err := os.ReadDir(path) + if err != nil { + // stop in case of error + return err + } + + if len(entries) == 0 { + base := filepath.Dir(path) + if err := os.RemoveAll(path); err != nil { + return err + } + return removeRecursively(root, base) + } + + return nil +} + func loadBlockDirectories(root string, logger log.Logger) (keys []string, values []BlockDirectory) { resolver := NewPrefixedResolver(root, defaultKeyResolver{}) _ = filepath.WalkDir(root, func(path string, dirEntry fs.DirEntry, e error) error { @@ -57,6 +80,15 @@ func loadBlockDirectories(root string, logger log.Logger) (keys []string, values return nil } + // Remove empty directories recursively + // filepath.WalkDir() does not support depth-first traversal, + // so this is not very efficient + err := removeRecursively(root, path) + if err != nil { + level.Warn(logger).Log("msg", "failed to remove directory", "path", path, "err", err) + return nil + } + ref, err := resolver.ParseBlockKey(key(path)) if err != nil { return nil diff --git a/pkg/storage/stores/shipper/bloomshipper/cache_test.go b/pkg/storage/stores/shipper/bloomshipper/cache_test.go index dd7a44e57cf7d..2ce48d5022ed2 100644 --- a/pkg/storage/stores/shipper/bloomshipper/cache_test.go +++ b/pkg/storage/stores/shipper/bloomshipper/cache_test.go @@ -2,6 +2,7 @@ package bloomshipper import ( "context" + "io/fs" "os" "path/filepath" "sync" @@ -66,19 +67,24 @@ func Test_LoadBlocksDirIntoCache(t *testing.T) { fp.Close() // invalid directory - _ = os.MkdirAll(filepath.Join(wd, "not/a/valid/blockdir"), 0o755) + invalidDir := "not/a/valid/blockdir" + _ = os.MkdirAll(filepath.Join(wd, invalidDir), 0o755) - // empty block directory - fn1 := "bloom/table_1/tenant/blocks/0000000000000000-000000000000ffff/0-3600000-abcd" - _ = os.MkdirAll(filepath.Join(wd, fn1), 0o755) + // empty block directories + emptyDir1 := "bloom/table_1/tenant/blocks/0000000000000000-000000000000ffff/0-3600000-abcd" + _ = os.MkdirAll(filepath.Join(wd, emptyDir1), 0o755) + emptyDir2 := "bloom/table_1/tenant/blocks/0000000000010000-000000000001ffff/0-3600000-ef01" + _ = os.MkdirAll(filepath.Join(wd, emptyDir2), 0o755) + emptyDir3 := "bloom/table_1/tenant/blocks/0000000000020000-000000000002ffff/0-3600000-2345" + _ = os.MkdirAll(filepath.Join(wd, emptyDir3), 0o755) // valid block directory - fn2 := "bloom/table_2/tenant/blocks/0000000000010000-000000000001ffff/0-3600000-abcd" - _ = os.MkdirAll(filepath.Join(wd, fn2), 0o755) - fp, _ = os.Create(filepath.Join(wd, fn2, "bloom")) - fp.Close() - fp, _ = os.Create(filepath.Join(wd, fn2, "series")) - fp.Close() + validDir := "bloom/table_2/tenant/blocks/0000000000010000-000000000001ffff/0-3600000-abcd" + _ = os.MkdirAll(filepath.Join(wd, validDir), 0o755) + for _, fn := range []string{"bloom", "series"} { + fp, _ = os.Create(filepath.Join(wd, validDir, fn)) + fp.Close() + } cfg := config.BlocksCacheConfig{ SoftLimit: 1 << 20, @@ -93,9 +99,28 @@ func Test_LoadBlocksDirIntoCache(t *testing.T) { require.Equal(t, 1, len(c.entries)) - key := filepath.Join(wd, fn2) + ".tar.gz" + key := filepath.Join(wd, validDir) + ".tar.gz" elem, found := c.entries[key] require.True(t, found) blockDir := elem.Value.(*Entry).Value - require.Equal(t, filepath.Join(wd, fn2), blockDir.Path) + require.Equal(t, filepath.Join(wd, validDir), blockDir.Path) + + // check cleaned directories + dirs := make([]string, 0, 6) + _ = filepath.WalkDir(wd, func(path string, dirEntry fs.DirEntry, _ error) error { + if !dirEntry.IsDir() { + return nil + } + dirs = append(dirs, path) + return nil + }) + require.Equal(t, []string{ + filepath.Join(wd), + filepath.Join(wd, "bloom/"), + filepath.Join(wd, "bloom/table_2/"), + filepath.Join(wd, "bloom/table_2/tenant/"), + filepath.Join(wd, "bloom/table_2/tenant/blocks/"), + filepath.Join(wd, "bloom/table_2/tenant/blocks/0000000000010000-000000000001ffff"), + filepath.Join(wd, "bloom/table_2/tenant/blocks/0000000000010000-000000000001ffff/0-3600000-abcd"), + }, dirs) } From db7c05c00537684c98d29b40534c2c6cfb2f5a39 Mon Sep 17 00:00:00 2001 From: Trevor Whitney Date: Mon, 6 May 2024 09:08:39 -0600 Subject: [PATCH 06/47] ci: force one time run of helm release for 5.48 (#12888) --- .github/workflows/helm-release.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/helm-release.yaml b/.github/workflows/helm-release.yaml index 1d5847e543554..b1d065aa9f694 100644 --- a/.github/workflows/helm-release.yaml +++ b/.github/workflows/helm-release.yaml @@ -4,6 +4,7 @@ on: push: branches: - main + - helm-5.48 paths: - 'production/helm/loki/Chart.yaml' From a772ed705c6506992cd1f2364b11fa60c1879f57 Mon Sep 17 00:00:00 2001 From: Travis Patterson Date: Mon, 6 May 2024 09:44:31 -0600 Subject: [PATCH 07/47] fix: Invalidate caches when pipeline wrappers are disabled (#12903) --- pkg/storage/chunk/cache/resultscache/cache.go | 2 +- .../resultscache/pipelinewrapper_keygen.go | 23 +++++++++++++ .../pipelinewrapper_keygen_test.go | 32 +++++++++++++++++++ 3 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen.go create mode 100644 pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen_test.go diff --git a/pkg/storage/chunk/cache/resultscache/cache.go b/pkg/storage/chunk/cache/resultscache/cache.go index 549e0b72983cb..d6e153cf693b0 100644 --- a/pkg/storage/chunk/cache/resultscache/cache.go +++ b/pkg/storage/chunk/cache/resultscache/cache.go @@ -87,7 +87,7 @@ func NewResultsCache( next: next, cache: c, limits: limits, - keyGen: keyGen, + keyGen: NewPipelineWrapperKeygen(keyGen), cacheGenNumberLoader: cacheGenNumberLoader, retentionEnabled: retentionEnabled, extractor: extractor, diff --git a/pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen.go b/pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen.go new file mode 100644 index 0000000000000..e3681d961effe --- /dev/null +++ b/pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen.go @@ -0,0 +1,23 @@ +package resultscache + +import ( + "context" + "github.com/grafana/loki/v3/pkg/util/httpreq" +) + +type PipelineWrapperKeyGenerator struct { + inner KeyGenerator +} + +func NewPipelineWrapperKeygen(inner KeyGenerator) KeyGenerator { + return &PipelineWrapperKeyGenerator{inner: inner} +} + +func (kg *PipelineWrapperKeyGenerator) GenerateCacheKey(ctx context.Context, userID string, r Request) string { + innerKey := kg.inner.GenerateCacheKey(ctx, userID, r) + + if httpreq.ExtractHeader(ctx, httpreq.LokiDisablePipelineWrappersHeader) == "true" { + return "pipeline-disabled:" + innerKey + } + return innerKey +} diff --git a/pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen_test.go b/pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen_test.go new file mode 100644 index 0000000000000..621a77d859072 --- /dev/null +++ b/pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen_test.go @@ -0,0 +1,32 @@ +package resultscache + +import ( + "context" + "github.com/grafana/loki/v3/pkg/util/httpreq" + "github.com/stretchr/testify/require" + "testing" +) + +func TestPipelineWrapperKeygen(t *testing.T) { + kg := &stubKeygen{key: "cache-key"} + keygen := NewPipelineWrapperKeygen(kg) + + t.Run("it does nothing if pipeline wrappers aren't disabled", func(t *testing.T) { + key := keygen.GenerateCacheKey(context.Background(), "", nil) + require.Equal(t, "cache-key", key) + }) + + t.Run("it changes the key when pipeline wrappers are disabled", func(t *testing.T) { + ctx := httpreq.InjectHeader(context.Background(), httpreq.LokiDisablePipelineWrappersHeader, "true") + key := keygen.GenerateCacheKey(ctx, "", nil) + require.Equal(t, "pipeline-disabled:cache-key", key) + }) +} + +type stubKeygen struct { + key string +} + +func (k *stubKeygen) GenerateCacheKey(_ context.Context, _ string, _ Request) string { + return k.key +} From 772616cd8f5cbac70374dd4a53f1714fb49a7a3b Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Tue, 7 May 2024 10:51:23 +0200 Subject: [PATCH 08/47] fix: Use an intermediate env variable in GH workflow (#12905) > For inline scripts, the preferred approach to handling untrusted input is to set the value of the expression to an intermediate environment variable. Source: https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions#using-an-intermediate-environment-variable Signed-off-by: Christian Haudum --- .github/workflows/operator-check-prepare-release-commit.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/operator-check-prepare-release-commit.yml b/.github/workflows/operator-check-prepare-release-commit.yml index bbc27643c1abe..2099230718634 100644 --- a/.github/workflows/operator-check-prepare-release-commit.yml +++ b/.github/workflows/operator-check-prepare-release-commit.yml @@ -16,8 +16,9 @@ jobs: steps: - name: Extract release version id: pr_semver + env: + PR_TITLE: ${{ github.event.pull_request.title }} run: | - PR_TITLE="${{ github.event.pull_request.title }}" SEMVER=$(echo "$PR_TITLE" | sed -n 's/^chore( operator): community release \([0-9]\+\.[0-9]\+\.[0-9]\+\)$/\1/p') echo "semver=$SEMVER" >> $GITHUB_OUTPUT From 19fef9355fdd46911611dbec25df0f5a4e397d31 Mon Sep 17 00:00:00 2001 From: Sven Grossmann Date: Tue, 7 May 2024 13:22:18 +0200 Subject: [PATCH 09/47] feat(detectedFields): Support multiple parsers to be returned for a single field (#12899) --- pkg/logproto/logproto.pb.go | 381 ++++++++++++++-------------- pkg/logproto/logproto.proto | 2 +- pkg/querier/querier.go | 27 +- pkg/storage/detected/fields.go | 28 +- pkg/storage/detected/fields_test.go | 27 +- 5 files changed, 253 insertions(+), 212 deletions(-) diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go index ac9bd37a06186..5ba5e49c1060b 100644 --- a/pkg/logproto/logproto.pb.go +++ b/pkg/logproto/logproto.pb.go @@ -2818,7 +2818,7 @@ type DetectedField struct { Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"` Type DetectedFieldType `protobuf:"bytes,2,opt,name=type,proto3,casttype=DetectedFieldType" json:"type,omitempty"` Cardinality uint64 `protobuf:"varint,3,opt,name=cardinality,proto3" json:"cardinality,omitempty"` - Parser string `protobuf:"bytes,4,opt,name=parser,proto3" json:"parser,omitempty"` + Parsers []string `protobuf:"bytes,4,rep,name=parsers,proto3" json:"parsers,omitempty"` Sketch []byte `protobuf:"bytes,5,opt,name=sketch,proto3" json:"sketch,omitempty"` } @@ -2875,11 +2875,11 @@ func (m *DetectedField) GetCardinality() uint64 { return 0 } -func (m *DetectedField) GetParser() string { +func (m *DetectedField) GetParsers() []string { if m != nil { - return m.Parser + return m.Parsers } - return "" + return nil } func (m *DetectedField) GetSketch() []byte { @@ -3105,174 +3105,174 @@ func init() { func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) } var fileDescriptor_c28a5f14f1f4c79a = []byte{ - // 2671 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0x4d, 0x8c, 0x1b, 0x57, - 0xd9, 0x63, 0x8f, 0xbd, 0xf6, 0x67, 0xef, 0x66, 0xf3, 0xd6, 0x49, 0xac, 0x4d, 0xea, 0xd9, 0x3e, - 0x41, 0x1b, 0x9a, 0x74, 0xdd, 0xa4, 0xb4, 0xa4, 0x29, 0xa5, 0xc4, 0xbb, 0xcd, 0x36, 0xe9, 0x36, - 0x4d, 0xdf, 0xa6, 0x69, 0x41, 0x54, 0xd5, 0xc4, 0x7e, 0xeb, 0x1d, 0xc5, 0x9e, 0x71, 0x66, 0x9e, - 0x9b, 0xee, 0x0d, 0x89, 0x33, 0xa2, 0x12, 0x07, 0xe0, 0x82, 0x84, 0x84, 0x04, 0x02, 0xf5, 0x82, - 0x38, 0x70, 0x40, 0x70, 0xe1, 0x50, 0x6e, 0xe5, 0x56, 0xf5, 0x60, 0xe8, 0xf6, 0x82, 0xf6, 0x54, - 0x09, 0x89, 0x43, 0x4f, 0xe8, 0xfd, 0xcd, 0xbc, 0x99, 0xb5, 0x49, 0xbd, 0x0d, 0x2a, 0xb9, 0xd8, - 0xf3, 0xbe, 0xf7, 0xbd, 0xef, 0xbd, 0xef, 0xe7, 0x7d, 0x7f, 0x33, 0x70, 0x72, 0x78, 0xbb, 0xd7, - 0xea, 0x07, 0xbd, 0x61, 0x18, 0xb0, 0x20, 0x7e, 0x58, 0x15, 0xbf, 0xa8, 0xac, 0xc7, 0xcb, 0xf5, - 0x5e, 0xd0, 0x0b, 0x24, 0x0e, 0x7f, 0x92, 0xf3, 0xcb, 0x4e, 0x2f, 0x08, 0x7a, 0x7d, 0xda, 0x12, - 0xa3, 0x5b, 0xa3, 0xed, 0x16, 0xf3, 0x06, 0x34, 0x62, 0xee, 0x60, 0xa8, 0x10, 0x56, 0x14, 0xf5, - 0x3b, 0xfd, 0x41, 0xd0, 0xa5, 0xfd, 0x56, 0xc4, 0x5c, 0x16, 0xc9, 0x5f, 0x85, 0xb1, 0xc4, 0x31, - 0x86, 0xa3, 0x68, 0x47, 0xfc, 0x48, 0x20, 0xfe, 0xbd, 0x05, 0xc7, 0x36, 0xdd, 0x5b, 0xb4, 0x7f, - 0x23, 0xb8, 0xe9, 0xf6, 0x47, 0x34, 0x22, 0x34, 0x1a, 0x06, 0x7e, 0x44, 0xd1, 0x1a, 0x94, 0xfa, - 0x7c, 0x22, 0x6a, 0x58, 0x2b, 0x85, 0xd3, 0xd5, 0xf3, 0x67, 0x56, 0xe3, 0x23, 0x4f, 0x5c, 0x20, - 0xa1, 0xd1, 0x0b, 0x3e, 0x0b, 0x77, 0x89, 0x5a, 0xba, 0x7c, 0x13, 0xaa, 0x06, 0x18, 0x2d, 0x42, - 0xe1, 0x36, 0xdd, 0x6d, 0x58, 0x2b, 0xd6, 0xe9, 0x0a, 0xe1, 0x8f, 0xe8, 0x1c, 0x14, 0xdf, 0xe6, - 0x64, 0x1a, 0xf9, 0x15, 0xeb, 0x74, 0xf5, 0xfc, 0xc9, 0x64, 0x93, 0xd7, 0x7c, 0xef, 0xce, 0x88, - 0x8a, 0xd5, 0x6a, 0x23, 0x89, 0x79, 0x31, 0x7f, 0xc1, 0xc2, 0x67, 0xe0, 0xe8, 0x81, 0x79, 0x74, - 0x1c, 0x4a, 0x02, 0x43, 0x9e, 0xb8, 0x42, 0xd4, 0x08, 0xd7, 0x01, 0x6d, 0xb1, 0x90, 0xba, 0x03, - 0xe2, 0x32, 0x7e, 0xde, 0x3b, 0x23, 0x1a, 0x31, 0xfc, 0x32, 0x2c, 0xa5, 0xa0, 0x8a, 0xed, 0xa7, - 0xa1, 0x1a, 0x25, 0x60, 0xc5, 0x7b, 0x3d, 0x39, 0x56, 0xb2, 0x86, 0x98, 0x88, 0xf8, 0xe7, 0x16, - 0x40, 0x32, 0x87, 0x9a, 0x00, 0x72, 0xf6, 0x45, 0x37, 0xda, 0x11, 0x0c, 0xdb, 0xc4, 0x80, 0xa0, - 0xb3, 0x70, 0x34, 0x19, 0x5d, 0x0b, 0xb6, 0x76, 0xdc, 0xb0, 0x2b, 0x64, 0x60, 0x93, 0x83, 0x13, - 0x08, 0x81, 0x1d, 0xba, 0x8c, 0x36, 0x0a, 0x2b, 0xd6, 0xe9, 0x02, 0x11, 0xcf, 0x9c, 0x5b, 0x46, - 0x7d, 0xd7, 0x67, 0x0d, 0x5b, 0x88, 0x53, 0x8d, 0x38, 0x9c, 0xeb, 0x97, 0x46, 0x8d, 0xe2, 0x8a, - 0x75, 0x7a, 0x9e, 0xa8, 0x11, 0xfe, 0x77, 0x01, 0x6a, 0xaf, 0x8e, 0x68, 0xb8, 0xab, 0x04, 0x80, - 0x9a, 0x50, 0x8e, 0x68, 0x9f, 0x76, 0x58, 0x10, 0x4a, 0x8d, 0xb4, 0xf3, 0x0d, 0x8b, 0xc4, 0x30, - 0x54, 0x87, 0x62, 0xdf, 0x1b, 0x78, 0x4c, 0x1c, 0x6b, 0x9e, 0xc8, 0x01, 0xba, 0x08, 0xc5, 0x88, - 0xb9, 0x21, 0x13, 0x67, 0xa9, 0x9e, 0x5f, 0x5e, 0x95, 0x86, 0xb9, 0xaa, 0x0d, 0x73, 0xf5, 0x86, - 0x36, 0xcc, 0x76, 0xf9, 0xfd, 0xb1, 0x93, 0x7b, 0xf7, 0xef, 0x8e, 0x45, 0xe4, 0x12, 0xf4, 0x34, - 0x14, 0xa8, 0xdf, 0x15, 0xe7, 0xfd, 0xbc, 0x2b, 0xf9, 0x02, 0x74, 0x0e, 0x2a, 0x5d, 0x2f, 0xa4, - 0x1d, 0xe6, 0x05, 0xbe, 0xe0, 0x6a, 0xe1, 0xfc, 0x52, 0xa2, 0x91, 0x75, 0x3d, 0x45, 0x12, 0x2c, - 0x74, 0x16, 0x4a, 0x11, 0x17, 0x5d, 0xd4, 0x98, 0xe3, 0xb6, 0xd0, 0xae, 0xef, 0x8f, 0x9d, 0x45, - 0x09, 0x39, 0x1b, 0x0c, 0x3c, 0x46, 0x07, 0x43, 0xb6, 0x4b, 0x14, 0x0e, 0x7a, 0x0c, 0xe6, 0xba, - 0xb4, 0x4f, 0xb9, 0xc2, 0xcb, 0x42, 0xe1, 0x8b, 0x06, 0x79, 0x31, 0x41, 0x34, 0x02, 0x7a, 0x13, - 0xec, 0x61, 0xdf, 0xf5, 0x1b, 0x15, 0xc1, 0xc5, 0x42, 0x82, 0x78, 0xbd, 0xef, 0xfa, 0xed, 0x67, - 0x3e, 0x1a, 0x3b, 0x4f, 0xf5, 0x3c, 0xb6, 0x33, 0xba, 0xb5, 0xda, 0x09, 0x06, 0xad, 0x5e, 0xe8, - 0x6e, 0xbb, 0xbe, 0xdb, 0xea, 0x07, 0xb7, 0xbd, 0xd6, 0xdb, 0x4f, 0xb6, 0xf8, 0x1d, 0xbc, 0x33, - 0xa2, 0xa1, 0x47, 0xc3, 0x16, 0x27, 0xb3, 0x2a, 0x54, 0xc2, 0x97, 0x12, 0x41, 0x16, 0x5d, 0xe5, - 0xf6, 0x17, 0x84, 0x74, 0x6d, 0x67, 0xe4, 0xdf, 0x8e, 0x1a, 0x20, 0x76, 0x39, 0x91, 0xec, 0x22, - 0xe0, 0x84, 0x6e, 0x6f, 0x84, 0xc1, 0x68, 0xd8, 0x3e, 0xb2, 0x3f, 0x76, 0x4c, 0x7c, 0x62, 0x0e, - 0xae, 0xda, 0xe5, 0xd2, 0xe2, 0x1c, 0x7e, 0xaf, 0x00, 0x68, 0xcb, 0x1d, 0x0c, 0xfb, 0x74, 0x26, - 0xf5, 0xc7, 0x8a, 0xce, 0x1f, 0x5a, 0xd1, 0x85, 0x59, 0x15, 0x9d, 0x68, 0xcd, 0x9e, 0x4d, 0x6b, - 0xc5, 0xcf, 0xab, 0xb5, 0xd2, 0xff, 0xbd, 0xd6, 0x70, 0x03, 0x6c, 0x4e, 0x99, 0x3b, 0xcb, 0xd0, - 0xbd, 0x2b, 0x74, 0x53, 0x23, 0xfc, 0x11, 0x6f, 0x42, 0x49, 0xf2, 0x85, 0x96, 0xb3, 0xca, 0x4b, - 0xdf, 0xdb, 0x44, 0x71, 0x05, 0xad, 0x92, 0xc5, 0x44, 0x25, 0x05, 0x21, 0x6c, 0xfc, 0x47, 0x0b, - 0xe6, 0x95, 0x45, 0x28, 0xdf, 0x77, 0x0b, 0xe6, 0xa4, 0xef, 0xd1, 0x7e, 0xef, 0x44, 0xd6, 0xef, - 0x5d, 0xea, 0xba, 0x43, 0x46, 0xc3, 0x76, 0xeb, 0xfd, 0xb1, 0x63, 0x7d, 0x34, 0x76, 0x1e, 0x9d, - 0x26, 0x34, 0x1d, 0x6b, 0xb4, 0xbf, 0xd4, 0x84, 0xd1, 0x19, 0x71, 0x3a, 0x16, 0x29, 0xb3, 0x3a, - 0xb2, 0x2a, 0x43, 0xd4, 0x15, 0xbf, 0x47, 0x23, 0x4e, 0xd9, 0xe6, 0x16, 0x41, 0x24, 0x0e, 0x67, - 0xf3, 0xae, 0x1b, 0xfa, 0x9e, 0xdf, 0x8b, 0x1a, 0x05, 0xe1, 0xd3, 0xe3, 0x31, 0xfe, 0xa9, 0x05, - 0x4b, 0x29, 0xb3, 0x56, 0x4c, 0x5c, 0x80, 0x52, 0xc4, 0x35, 0xa5, 0x79, 0x30, 0x8c, 0x62, 0x4b, - 0xc0, 0xdb, 0x0b, 0xea, 0xf0, 0x25, 0x39, 0x26, 0x0a, 0xff, 0xfe, 0x1d, 0xed, 0x2f, 0x16, 0xd4, - 0x44, 0x60, 0xd2, 0x77, 0x0d, 0x81, 0xed, 0xbb, 0x03, 0xaa, 0x54, 0x25, 0x9e, 0x8d, 0x68, 0xc5, - 0xb7, 0x2b, 0xeb, 0x68, 0x35, 0xab, 0x83, 0xb5, 0x0e, 0xed, 0x60, 0xad, 0xe4, 0xde, 0xd5, 0xa1, - 0xc8, 0xcd, 0x7b, 0x57, 0x38, 0xd7, 0x0a, 0x91, 0x03, 0xfc, 0x28, 0xcc, 0x2b, 0x2e, 0x94, 0x68, - 0xa7, 0x05, 0xd8, 0x01, 0x94, 0xa4, 0x26, 0xd0, 0x57, 0xa0, 0x12, 0x27, 0x26, 0x82, 0xdb, 0x42, - 0xbb, 0xb4, 0x3f, 0x76, 0xf2, 0x2c, 0x22, 0xc9, 0x04, 0x72, 0xcc, 0xa0, 0x6f, 0xb5, 0x2b, 0xfb, - 0x63, 0x47, 0x02, 0x54, 0x88, 0x47, 0xa7, 0xc0, 0xde, 0xe1, 0x71, 0x93, 0x8b, 0xc0, 0x6e, 0x97, - 0xf7, 0xc7, 0x8e, 0x18, 0x13, 0xf1, 0x8b, 0x37, 0xa0, 0xb6, 0x49, 0x7b, 0x6e, 0x67, 0x57, 0x6d, - 0x5a, 0xd7, 0xe4, 0xf8, 0x86, 0x96, 0xa6, 0xf1, 0x30, 0xd4, 0xe2, 0x1d, 0xdf, 0x1a, 0x44, 0xea, - 0x36, 0x54, 0x63, 0xd8, 0xcb, 0x11, 0xfe, 0x99, 0x05, 0xca, 0x06, 0x10, 0x36, 0xb2, 0x1d, 0xee, - 0x0b, 0x61, 0x7f, 0xec, 0x28, 0x88, 0x4e, 0x66, 0xd0, 0xb3, 0x30, 0x17, 0x89, 0x1d, 0x39, 0xb1, - 0xac, 0x69, 0x89, 0x89, 0xf6, 0x11, 0x6e, 0x22, 0xfb, 0x63, 0x47, 0x23, 0x12, 0xfd, 0x80, 0x56, - 0x53, 0x09, 0x81, 0x64, 0x6c, 0x61, 0x7f, 0xec, 0x18, 0x50, 0x33, 0x41, 0xc0, 0x9f, 0x59, 0x50, - 0xbd, 0xe1, 0x7a, 0xb1, 0x09, 0x35, 0xb4, 0x8a, 0x12, 0x5f, 0x2d, 0x01, 0xdc, 0x12, 0xbb, 0xb4, - 0xef, 0xee, 0x5e, 0x0e, 0x42, 0x41, 0x77, 0x9e, 0xc4, 0xe3, 0x24, 0x86, 0xdb, 0x13, 0x63, 0x78, - 0x71, 0x76, 0xd7, 0xfe, 0xbf, 0x75, 0xa4, 0x57, 0xed, 0x72, 0x7e, 0xb1, 0x80, 0xdf, 0xb3, 0xa0, - 0x26, 0x99, 0x57, 0x96, 0xf7, 0x3d, 0x28, 0x49, 0xd9, 0x08, 0xf6, 0xff, 0x8b, 0x63, 0x3a, 0x33, - 0x8b, 0x53, 0x52, 0x34, 0xd1, 0xf3, 0xb0, 0xd0, 0x0d, 0x83, 0xe1, 0x90, 0x76, 0xb7, 0x94, 0xfb, - 0xcb, 0x67, 0xdd, 0xdf, 0xba, 0x39, 0x4f, 0x32, 0xe8, 0xf8, 0xaf, 0x16, 0xcc, 0x2b, 0x67, 0xa2, - 0xd4, 0x15, 0x8b, 0xd8, 0x3a, 0x74, 0xf4, 0xcc, 0xcf, 0x1a, 0x3d, 0x8f, 0x43, 0xa9, 0xc7, 0xe3, - 0x8b, 0x76, 0x48, 0x6a, 0x34, 0x5b, 0x54, 0xc5, 0x57, 0x61, 0x41, 0xb3, 0x32, 0xc5, 0xa3, 0x2e, - 0x67, 0x3d, 0xea, 0x95, 0x2e, 0xf5, 0x99, 0xb7, 0xed, 0xc5, 0x3e, 0x52, 0xe1, 0xe3, 0x1f, 0x59, - 0xb0, 0x98, 0x45, 0x41, 0xeb, 0x99, 0xc2, 0xe2, 0x91, 0xe9, 0xe4, 0xcc, 0x9a, 0x42, 0x93, 0x56, - 0x95, 0xc5, 0x53, 0xf7, 0xaa, 0x2c, 0xea, 0xa6, 0x93, 0xa9, 0x28, 0xaf, 0x80, 0x7f, 0x62, 0xc1, - 0x7c, 0x4a, 0x97, 0xe8, 0x02, 0xd8, 0xdb, 0x61, 0x30, 0x98, 0x49, 0x51, 0x62, 0x05, 0xfa, 0x3a, - 0xe4, 0x59, 0x30, 0x93, 0x9a, 0xf2, 0x2c, 0xe0, 0x5a, 0x52, 0xec, 0x17, 0x64, 0xde, 0x2e, 0x47, - 0xf8, 0x29, 0xa8, 0x08, 0x86, 0xae, 0xbb, 0x5e, 0x38, 0x31, 0x60, 0x4c, 0x66, 0xe8, 0x59, 0x38, - 0x22, 0x9d, 0xe1, 0xe4, 0xc5, 0xb5, 0x49, 0x8b, 0x6b, 0x7a, 0xf1, 0x49, 0x28, 0x8a, 0xa4, 0x83, - 0x2f, 0xe9, 0xba, 0xcc, 0xd5, 0x4b, 0xf8, 0x33, 0x3e, 0x06, 0x4b, 0xfc, 0x0e, 0xd2, 0x30, 0x5a, - 0x0b, 0x46, 0x3e, 0xd3, 0x75, 0xd3, 0x59, 0xa8, 0xa7, 0xc1, 0xca, 0x4a, 0xea, 0x50, 0xec, 0x70, - 0x80, 0xa0, 0x31, 0x4f, 0xe4, 0x00, 0xff, 0xd2, 0x02, 0xb4, 0x41, 0x99, 0xd8, 0xe5, 0xca, 0x7a, - 0x7c, 0x3d, 0x96, 0xa1, 0x3c, 0x70, 0x59, 0x67, 0x87, 0x86, 0x91, 0xce, 0x5f, 0xf4, 0xf8, 0xcb, - 0x48, 0x3c, 0xf1, 0x39, 0x58, 0x4a, 0x9d, 0x52, 0xf1, 0xb4, 0x0c, 0xe5, 0x8e, 0x82, 0xa9, 0x90, - 0x17, 0x8f, 0xf1, 0xef, 0xf2, 0x50, 0xd6, 0x69, 0x1d, 0x3a, 0x07, 0xd5, 0x6d, 0xcf, 0xef, 0xd1, - 0x70, 0x18, 0x7a, 0x4a, 0x04, 0xb6, 0x4c, 0xf3, 0x0c, 0x30, 0x31, 0x07, 0xe8, 0x71, 0x98, 0x1b, - 0x45, 0x34, 0x7c, 0xcb, 0x93, 0x37, 0xbd, 0xd2, 0xae, 0xef, 0x8d, 0x9d, 0xd2, 0x6b, 0x11, 0x0d, - 0xaf, 0xac, 0xf3, 0xe0, 0x33, 0x12, 0x4f, 0x44, 0xfe, 0x77, 0xd1, 0x4b, 0xca, 0x4c, 0x45, 0x02, - 0xd7, 0xfe, 0x06, 0x3f, 0x7e, 0xc6, 0xd5, 0x0d, 0xc3, 0x60, 0x40, 0xd9, 0x0e, 0x1d, 0x45, 0xad, - 0x4e, 0x30, 0x18, 0x04, 0x7e, 0x4b, 0x74, 0x02, 0x04, 0xd3, 0x3c, 0x82, 0xf2, 0xe5, 0xca, 0x72, - 0x6f, 0xc0, 0x1c, 0xdb, 0x09, 0x83, 0x51, 0x6f, 0x47, 0x04, 0x86, 0x42, 0xfb, 0xe2, 0xec, 0xf4, - 0x34, 0x05, 0xa2, 0x1f, 0xd0, 0xc3, 0x5c, 0x5a, 0xb4, 0x73, 0x3b, 0x1a, 0x0d, 0x64, 0xed, 0xd9, - 0x2e, 0xee, 0x8f, 0x1d, 0xeb, 0x71, 0x12, 0x83, 0xf1, 0x25, 0x98, 0x4f, 0xa5, 0xc2, 0xe8, 0x09, - 0xb0, 0x43, 0xba, 0xad, 0x5d, 0x01, 0x3a, 0x98, 0x31, 0xcb, 0xe8, 0xcf, 0x71, 0x88, 0xf8, 0xc5, - 0x3f, 0xcc, 0x83, 0x63, 0x54, 0xfd, 0x97, 0x83, 0xf0, 0x65, 0xca, 0x42, 0xaf, 0x73, 0xcd, 0x1d, - 0x50, 0x6d, 0x5e, 0x0e, 0x54, 0x07, 0x02, 0xf8, 0x96, 0x71, 0x8b, 0x60, 0x10, 0xe3, 0xa1, 0x87, - 0x00, 0xc4, 0xb5, 0x93, 0xf3, 0xf2, 0x42, 0x55, 0x04, 0x44, 0x4c, 0xaf, 0xa5, 0x84, 0xdd, 0x9a, - 0x51, 0x38, 0x4a, 0xc8, 0x57, 0xb2, 0x42, 0x9e, 0x99, 0x4e, 0x2c, 0x59, 0xf3, 0xba, 0x14, 0xd3, - 0xd7, 0x05, 0xff, 0xcd, 0x82, 0xe6, 0xa6, 0x3e, 0xf9, 0x21, 0xc5, 0xa1, 0xf9, 0xcd, 0xdf, 0x27, - 0x7e, 0x0b, 0x5f, 0x8c, 0x5f, 0xdc, 0x04, 0xd8, 0xf4, 0x7c, 0x7a, 0xd9, 0xeb, 0x33, 0x1a, 0x4e, - 0x28, 0x84, 0x7e, 0x5c, 0x48, 0xbc, 0x0a, 0xa1, 0xdb, 0x9a, 0xcf, 0x35, 0xc3, 0x95, 0xdf, 0x0f, - 0x36, 0xf2, 0xf7, 0x51, 0x6d, 0x85, 0x8c, 0x97, 0xf3, 0x61, 0x6e, 0x5b, 0xb0, 0x27, 0xa3, 0x72, - 0xaa, 0xc7, 0x94, 0xf0, 0xde, 0xfe, 0x96, 0xda, 0xfc, 0xe9, 0x7b, 0x24, 0x55, 0xa2, 0xf3, 0xd7, - 0x8a, 0x76, 0x7d, 0xe6, 0xbe, 0x63, 0xac, 0x27, 0x7a, 0x13, 0xe4, 0xaa, 0xbc, 0xad, 0x38, 0x31, - 0x6f, 0x7b, 0x4e, 0x6d, 0xf3, 0x45, 0x72, 0x37, 0xfc, 0x5c, 0xe2, 0x44, 0x85, 0x52, 0x94, 0x13, - 0x7d, 0xe4, 0x5e, 0x57, 0x5c, 0x5d, 0xec, 0x3f, 0x59, 0xb0, 0xb8, 0x41, 0x59, 0x3a, 0x8f, 0x7a, - 0x80, 0x54, 0x8a, 0x5f, 0x84, 0xa3, 0xc6, 0xf9, 0x15, 0xf7, 0x4f, 0x66, 0x92, 0xa7, 0x63, 0x09, - 0xff, 0x57, 0xfc, 0x2e, 0x7d, 0x47, 0xd5, 0xa4, 0xe9, 0xbc, 0xe9, 0x3a, 0x54, 0x8d, 0x49, 0x74, - 0x29, 0x93, 0x31, 0x2d, 0x65, 0x5a, 0xb1, 0x3c, 0xea, 0xb7, 0xeb, 0x8a, 0x27, 0x59, 0x79, 0xaa, - 0x7c, 0x38, 0xce, 0x2e, 0xb6, 0x00, 0x09, 0x75, 0x09, 0xb2, 0x66, 0x7c, 0x13, 0xd0, 0x97, 0xe2, - 0xd4, 0x29, 0x1e, 0xa3, 0x87, 0xc1, 0x0e, 0x83, 0xbb, 0x3a, 0x15, 0x9e, 0x4f, 0xb6, 0x24, 0xc1, - 0x5d, 0x22, 0xa6, 0xf0, 0xb3, 0x50, 0x20, 0xc1, 0x5d, 0xd4, 0x04, 0x08, 0x5d, 0xbf, 0x47, 0x6f, - 0xc6, 0x45, 0x58, 0x8d, 0x18, 0x90, 0x29, 0xb9, 0xc7, 0x1a, 0x1c, 0x35, 0x4f, 0x24, 0xd5, 0xbd, - 0x0a, 0x73, 0xaf, 0x8e, 0x4c, 0x71, 0xd5, 0x33, 0xe2, 0x92, 0xb5, 0xbe, 0x46, 0xe2, 0x36, 0x03, - 0x09, 0x1c, 0x9d, 0x82, 0x0a, 0x73, 0x6f, 0xf5, 0xe9, 0xb5, 0xc4, 0xcd, 0x25, 0x00, 0x3e, 0xcb, - 0xeb, 0xc7, 0x9b, 0x46, 0x12, 0x95, 0x00, 0xd0, 0x63, 0xb0, 0x98, 0x9c, 0xf9, 0x7a, 0x48, 0xb7, - 0xbd, 0x77, 0x84, 0x86, 0x6b, 0xe4, 0x00, 0x1c, 0x9d, 0x86, 0x23, 0x09, 0x6c, 0x4b, 0x24, 0x2b, - 0xb6, 0x40, 0xcd, 0x82, 0xb9, 0x6c, 0x04, 0xbb, 0x2f, 0xdc, 0x19, 0xb9, 0x7d, 0x71, 0xf9, 0x6a, - 0xc4, 0x80, 0xe0, 0x3f, 0x5b, 0x70, 0x54, 0xaa, 0x9a, 0xb9, 0xec, 0x81, 0xb4, 0xfa, 0x5f, 0x59, - 0x80, 0x4c, 0x0e, 0x94, 0x69, 0x7d, 0xd5, 0xec, 0x25, 0xf1, 0x6c, 0xa8, 0x2a, 0xca, 0x62, 0x09, - 0x4a, 0xda, 0x41, 0x18, 0x4a, 0x1d, 0xd9, 0x33, 0x13, 0xcd, 0x6f, 0x59, 0x77, 0x4b, 0x08, 0x51, - 0xff, 0xc8, 0x81, 0xe2, 0xad, 0x5d, 0x46, 0x23, 0x55, 0x35, 0x8b, 0x76, 0x81, 0x00, 0x10, 0xf9, - 0xc7, 0xf7, 0xa2, 0x3e, 0x13, 0x56, 0x63, 0x27, 0x7b, 0x29, 0x10, 0xd1, 0x0f, 0xf8, 0xb7, 0x79, - 0x98, 0xbf, 0x19, 0xf4, 0x47, 0x49, 0x60, 0x7c, 0x90, 0x02, 0x46, 0xaa, 0x94, 0x2f, 0xea, 0x52, - 0x1e, 0x81, 0x1d, 0x31, 0x3a, 0x14, 0x96, 0x55, 0x20, 0xe2, 0x19, 0x61, 0xa8, 0x31, 0x37, 0xec, - 0x51, 0x26, 0x0b, 0xa4, 0x46, 0x49, 0x64, 0xae, 0x29, 0x18, 0x5a, 0x81, 0xaa, 0xdb, 0xeb, 0x85, - 0xb4, 0xe7, 0x32, 0xda, 0xde, 0x6d, 0xcc, 0x89, 0xcd, 0x4c, 0x10, 0x7e, 0x03, 0x16, 0xb4, 0xb0, - 0x94, 0x4a, 0x9f, 0x80, 0xb9, 0xb7, 0x05, 0x64, 0x42, 0x6b, 0x4d, 0xa2, 0x2a, 0x37, 0xa6, 0xd1, - 0xd2, 0xaf, 0x10, 0xf4, 0x99, 0xf1, 0x55, 0x28, 0x49, 0x74, 0x74, 0xca, 0x2c, 0x73, 0x64, 0xa6, - 0xc7, 0xc7, 0xaa, 0x66, 0xc1, 0x50, 0x92, 0x84, 0x94, 0xe2, 0x85, 0x6d, 0x48, 0x08, 0x51, 0xff, - 0xf8, 0x5f, 0x16, 0x1c, 0x5b, 0xa7, 0x8c, 0x76, 0x18, 0xed, 0x5e, 0xf6, 0x68, 0xbf, 0xfb, 0xa5, - 0x56, 0xe0, 0x71, 0x1f, 0xad, 0x60, 0xf4, 0xd1, 0xb8, 0xdf, 0xe9, 0x7b, 0x3e, 0xdd, 0x34, 0x1a, - 0x31, 0x09, 0x80, 0x7b, 0x88, 0x6d, 0x7e, 0x70, 0x39, 0x2d, 0xdf, 0xd9, 0x18, 0x90, 0x58, 0xc3, - 0xa5, 0x44, 0xc3, 0xf8, 0x07, 0x16, 0x1c, 0xcf, 0x72, 0xad, 0x94, 0xd4, 0x82, 0x92, 0x58, 0x3c, - 0xa1, 0x85, 0x9b, 0x5a, 0x41, 0x14, 0x1a, 0xba, 0x90, 0xda, 0x5f, 0xbc, 0xeb, 0x69, 0x37, 0xf6, - 0xc7, 0x4e, 0x3d, 0x81, 0x1a, 0x5d, 0x02, 0x03, 0x17, 0xff, 0x81, 0xd7, 0xd2, 0x26, 0x4d, 0xa1, - 0x6f, 0x6e, 0x5f, 0xca, 0xf7, 0xca, 0x01, 0xfa, 0x1a, 0xd8, 0x6c, 0x77, 0xa8, 0x5c, 0x6e, 0xfb, - 0xd8, 0x67, 0x63, 0xe7, 0x68, 0x6a, 0xd9, 0x8d, 0xdd, 0x21, 0x25, 0x02, 0x85, 0x9b, 0x65, 0xc7, - 0x0d, 0xbb, 0x9e, 0xef, 0xf6, 0x3d, 0x26, 0xc5, 0x68, 0x13, 0x13, 0x24, 0x5e, 0x6f, 0xb9, 0x61, - 0x44, 0x43, 0xfd, 0xda, 0x4b, 0x8e, 0x44, 0x93, 0xe3, 0x36, 0x65, 0x9d, 0x1d, 0xe9, 0x64, 0x55, - 0x93, 0x43, 0x40, 0x52, 0x4d, 0x0e, 0x01, 0xc1, 0xbf, 0x30, 0xcc, 0x46, 0xde, 0x88, 0x43, 0x9a, - 0x8d, 0x75, 0x68, 0xb3, 0xb1, 0xee, 0x61, 0x36, 0xf8, 0x3b, 0x89, 0x8e, 0xf5, 0x11, 0x95, 0x8e, - 0x9f, 0x87, 0x85, 0x6e, 0x6a, 0x66, 0xba, 0xae, 0x65, 0x03, 0x37, 0x83, 0x8e, 0x37, 0x12, 0xc5, - 0x09, 0xc8, 0x14, 0xc5, 0x65, 0xb4, 0x91, 0x3f, 0xa0, 0x8d, 0xc7, 0x1e, 0x81, 0x4a, 0xfc, 0xfa, - 0x0d, 0x55, 0x61, 0xee, 0xf2, 0x2b, 0xe4, 0xf5, 0x4b, 0x64, 0x7d, 0x31, 0x87, 0x6a, 0x50, 0x6e, - 0x5f, 0x5a, 0x7b, 0x49, 0x8c, 0xac, 0xf3, 0xbf, 0x29, 0xe9, 0xc0, 0x1e, 0xa2, 0x6f, 0x42, 0x51, - 0x46, 0xeb, 0xe3, 0xc9, 0x71, 0xcd, 0x37, 0x53, 0xcb, 0x27, 0x0e, 0xc0, 0x25, 0xdf, 0x38, 0xf7, - 0x84, 0x85, 0xae, 0x41, 0x55, 0x00, 0x55, 0xef, 0xf7, 0x54, 0xb6, 0x05, 0x9b, 0xa2, 0xf4, 0xd0, - 0x94, 0x59, 0x83, 0xde, 0x45, 0x28, 0x4a, 0x11, 0x1c, 0xcf, 0x24, 0x55, 0x13, 0x4e, 0x93, 0xea, - 0x86, 0xe3, 0x1c, 0x7a, 0x06, 0xec, 0x1b, 0xae, 0xd7, 0x47, 0x46, 0x4e, 0x67, 0xb4, 0x6c, 0x97, - 0x8f, 0x67, 0xc1, 0xc6, 0xb6, 0xcf, 0xc5, 0x9d, 0xe7, 0x13, 0xd9, 0xf6, 0x97, 0x5e, 0xde, 0x38, - 0x38, 0x11, 0xef, 0xfc, 0x8a, 0xec, 0x8f, 0xea, 0x26, 0x0c, 0x7a, 0x28, 0xbd, 0x55, 0xa6, 0x67, - 0xb3, 0xdc, 0x9c, 0x36, 0x1d, 0x13, 0xdc, 0x84, 0xaa, 0xd1, 0x00, 0x31, 0xc5, 0x7a, 0xb0, 0x7b, - 0x63, 0x8a, 0x75, 0x42, 0xd7, 0x04, 0xe7, 0xd0, 0x06, 0x94, 0x79, 0x26, 0x2c, 0x5e, 0x94, 0x9c, - 0xcc, 0x26, 0xbc, 0x46, 0xa2, 0xb3, 0x7c, 0x6a, 0xf2, 0x64, 0x4c, 0xe8, 0xdb, 0x50, 0xd9, 0xa0, - 0x4c, 0x45, 0x8b, 0x13, 0xd9, 0x70, 0x33, 0x41, 0x52, 0xe9, 0x90, 0x85, 0x73, 0xe8, 0x0d, 0x91, - 0x94, 0xa7, 0x9d, 0x25, 0x72, 0xa6, 0x38, 0xc5, 0xf8, 0x5c, 0x2b, 0xd3, 0x11, 0x62, 0xca, 0xaf, - 0xa7, 0x28, 0xab, 0xb8, 0xea, 0x4c, 0xb9, 0x82, 0x31, 0x65, 0xe7, 0x1e, 0x9f, 0x51, 0xe0, 0xdc, - 0xf9, 0x37, 0xf5, 0x97, 0x04, 0xeb, 0x2e, 0x73, 0xd1, 0x2b, 0xb0, 0x20, 0x64, 0x19, 0x7f, 0x6a, - 0x90, 0xb2, 0xf9, 0x03, 0xdf, 0x35, 0xa4, 0x6c, 0xfe, 0xe0, 0xf7, 0x0d, 0x38, 0xd7, 0x7e, 0xf3, - 0x83, 0x8f, 0x9b, 0xb9, 0x0f, 0x3f, 0x6e, 0xe6, 0x3e, 0xfd, 0xb8, 0x69, 0x7d, 0x7f, 0xaf, 0x69, - 0xfd, 0x7a, 0xaf, 0x69, 0xbd, 0xbf, 0xd7, 0xb4, 0x3e, 0xd8, 0x6b, 0x5a, 0xff, 0xd8, 0x6b, 0x5a, - 0xff, 0xdc, 0x6b, 0xe6, 0x3e, 0xdd, 0x6b, 0x5a, 0xef, 0x7e, 0xd2, 0xcc, 0x7d, 0xf0, 0x49, 0x33, - 0xf7, 0xe1, 0x27, 0xcd, 0xdc, 0x77, 0x1f, 0xbd, 0x77, 0x01, 0x2a, 0x1d, 0x5d, 0x49, 0xfc, 0x3d, - 0xf9, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x82, 0x5c, 0x85, 0xa1, 0xef, 0x22, 0x00, 0x00, + // 2670 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0x4d, 0x8c, 0x5b, 0x47, + 0xd9, 0xcf, 0x7e, 0xfe, 0xfb, 0xec, 0xdd, 0x6c, 0x66, 0x9d, 0xc4, 0xda, 0xa4, 0x7e, 0xdb, 0x11, + 0xb4, 0xa1, 0x49, 0xd7, 0x4d, 0x4a, 0x4b, 0x9a, 0x52, 0x4a, 0xbc, 0xdb, 0x6c, 0x93, 0x6e, 0xd3, + 0x74, 0x36, 0x4d, 0x0b, 0xa2, 0xaa, 0x5e, 0xec, 0x59, 0xef, 0x53, 0xec, 0xf7, 0x9c, 0xf7, 0xc6, + 0x4d, 0xf7, 0x86, 0xc4, 0x19, 0x51, 0x89, 0x03, 0x70, 0x41, 0x42, 0x42, 0x02, 0x81, 0x7a, 0x41, + 0x9c, 0x10, 0x82, 0x0b, 0x87, 0x72, 0x2b, 0xb7, 0xaa, 0x07, 0x43, 0xb7, 0x17, 0xb4, 0xa7, 0x4a, + 0x48, 0x1c, 0x7a, 0x42, 0xf3, 0xf7, 0xde, 0xbc, 0xb7, 0x5e, 0x52, 0x6f, 0x83, 0x4a, 0x2e, 0xf6, + 0xcc, 0x37, 0xdf, 0x7c, 0x33, 0xdf, 0xcf, 0x7c, 0x7f, 0x36, 0x9c, 0x1c, 0xdd, 0xee, 0xb7, 0x07, + 0x41, 0x7f, 0x14, 0x06, 0x2c, 0x88, 0x07, 0x2b, 0xe2, 0x13, 0x55, 0xf4, 0x7c, 0xa9, 0xd1, 0x0f, + 0xfa, 0x81, 0xc4, 0xe1, 0x23, 0xb9, 0xbe, 0xe4, 0xf4, 0x83, 0xa0, 0x3f, 0xa0, 0x6d, 0x31, 0xbb, + 0x35, 0xde, 0x6a, 0x33, 0x6f, 0x48, 0x23, 0xe6, 0x0e, 0x47, 0x0a, 0x61, 0x59, 0x51, 0xbf, 0x33, + 0x18, 0x06, 0x3d, 0x3a, 0x68, 0x47, 0xcc, 0x65, 0x91, 0xfc, 0x54, 0x18, 0x8b, 0x1c, 0x63, 0x34, + 0x8e, 0xb6, 0xc5, 0x87, 0x04, 0xe2, 0xdf, 0x5b, 0x70, 0x6c, 0xc3, 0xbd, 0x45, 0x07, 0x37, 0x82, + 0x9b, 0xee, 0x60, 0x4c, 0x23, 0x42, 0xa3, 0x51, 0xe0, 0x47, 0x14, 0xad, 0x42, 0x69, 0xc0, 0x17, + 0xa2, 0xa6, 0xb5, 0x5c, 0x38, 0x5d, 0x3b, 0x7f, 0x66, 0x25, 0xbe, 0xf2, 0xd4, 0x0d, 0x12, 0x1a, + 0xbd, 0xe0, 0xb3, 0x70, 0x87, 0xa8, 0xad, 0x4b, 0x37, 0xa1, 0x66, 0x80, 0xd1, 0x02, 0x14, 0x6e, + 0xd3, 0x9d, 0xa6, 0xb5, 0x6c, 0x9d, 0xae, 0x12, 0x3e, 0x44, 0xe7, 0xa0, 0xf8, 0x36, 0x27, 0xd3, + 0xcc, 0x2f, 0x5b, 0xa7, 0x6b, 0xe7, 0x4f, 0x26, 0x87, 0xbc, 0xe6, 0x7b, 0x77, 0xc6, 0x54, 0xec, + 0x56, 0x07, 0x49, 0xcc, 0x8b, 0xf9, 0x0b, 0x16, 0x3e, 0x03, 0x47, 0xf7, 0xad, 0xa3, 0xe3, 0x50, + 0x12, 0x18, 0xf2, 0xc6, 0x55, 0xa2, 0x66, 0xb8, 0x01, 0x68, 0x93, 0x85, 0xd4, 0x1d, 0x12, 0x97, + 0xf1, 0xfb, 0xde, 0x19, 0xd3, 0x88, 0xe1, 0x97, 0x61, 0x31, 0x05, 0x55, 0x6c, 0x3f, 0x0d, 0xb5, + 0x28, 0x01, 0x2b, 0xde, 0x1b, 0xc9, 0xb5, 0x92, 0x3d, 0xc4, 0x44, 0xc4, 0x3f, 0xb7, 0x00, 0x92, + 0x35, 0xd4, 0x02, 0x90, 0xab, 0x2f, 0xba, 0xd1, 0xb6, 0x60, 0xd8, 0x26, 0x06, 0x04, 0x9d, 0x85, + 0xa3, 0xc9, 0xec, 0x5a, 0xb0, 0xb9, 0xed, 0x86, 0x3d, 0x21, 0x03, 0x9b, 0xec, 0x5f, 0x40, 0x08, + 0xec, 0xd0, 0x65, 0xb4, 0x59, 0x58, 0xb6, 0x4e, 0x17, 0x88, 0x18, 0x73, 0x6e, 0x19, 0xf5, 0x5d, + 0x9f, 0x35, 0x6d, 0x21, 0x4e, 0x35, 0xe3, 0x70, 0xae, 0x5f, 0x1a, 0x35, 0x8b, 0xcb, 0xd6, 0xe9, + 0x39, 0xa2, 0x66, 0xf8, 0xdf, 0x05, 0xa8, 0xbf, 0x3a, 0xa6, 0xe1, 0x8e, 0x12, 0x00, 0x6a, 0x41, + 0x25, 0xa2, 0x03, 0xda, 0x65, 0x41, 0x28, 0x35, 0xd2, 0xc9, 0x37, 0x2d, 0x12, 0xc3, 0x50, 0x03, + 0x8a, 0x03, 0x6f, 0xe8, 0x31, 0x71, 0xad, 0x39, 0x22, 0x27, 0xe8, 0x22, 0x14, 0x23, 0xe6, 0x86, + 0x4c, 0xdc, 0xa5, 0x76, 0x7e, 0x69, 0x45, 0x1a, 0xe6, 0x8a, 0x36, 0xcc, 0x95, 0x1b, 0xda, 0x30, + 0x3b, 0x95, 0xf7, 0x27, 0x4e, 0xee, 0xdd, 0xbf, 0x3b, 0x16, 0x91, 0x5b, 0xd0, 0xd3, 0x50, 0xa0, + 0x7e, 0x4f, 0xdc, 0xf7, 0xf3, 0xee, 0xe4, 0x1b, 0xd0, 0x39, 0xa8, 0xf6, 0xbc, 0x90, 0x76, 0x99, + 0x17, 0xf8, 0x82, 0xab, 0xf9, 0xf3, 0x8b, 0x89, 0x46, 0xd6, 0xf4, 0x12, 0x49, 0xb0, 0xd0, 0x59, + 0x28, 0x45, 0x5c, 0x74, 0x51, 0xb3, 0xcc, 0x6d, 0xa1, 0xd3, 0xd8, 0x9b, 0x38, 0x0b, 0x12, 0x72, + 0x36, 0x18, 0x7a, 0x8c, 0x0e, 0x47, 0x6c, 0x87, 0x28, 0x1c, 0xf4, 0x18, 0x94, 0x7b, 0x74, 0x40, + 0xb9, 0xc2, 0x2b, 0x42, 0xe1, 0x0b, 0x06, 0x79, 0xb1, 0x40, 0x34, 0x02, 0x7a, 0x13, 0xec, 0xd1, + 0xc0, 0xf5, 0x9b, 0x55, 0xc1, 0xc5, 0x7c, 0x82, 0x78, 0x7d, 0xe0, 0xfa, 0x9d, 0x67, 0x3e, 0x9a, + 0x38, 0x4f, 0xf5, 0x3d, 0xb6, 0x3d, 0xbe, 0xb5, 0xd2, 0x0d, 0x86, 0xed, 0x7e, 0xe8, 0x6e, 0xb9, + 0xbe, 0xdb, 0x1e, 0x04, 0xb7, 0xbd, 0xf6, 0xdb, 0x4f, 0xb6, 0xf9, 0x1b, 0xbc, 0x33, 0xa6, 0xa1, + 0x47, 0xc3, 0x36, 0x27, 0xb3, 0x22, 0x54, 0xc2, 0xb7, 0x12, 0x41, 0x16, 0x5d, 0xe5, 0xf6, 0x17, + 0x84, 0x74, 0x75, 0x7b, 0xec, 0xdf, 0x8e, 0x9a, 0x20, 0x4e, 0x39, 0x91, 0x9c, 0x22, 0xe0, 0x84, + 0x6e, 0xad, 0x87, 0xc1, 0x78, 0xd4, 0x39, 0xb2, 0x37, 0x71, 0x4c, 0x7c, 0x62, 0x4e, 0xae, 0xda, + 0x95, 0xd2, 0x42, 0x19, 0xbf, 0x57, 0x00, 0xb4, 0xe9, 0x0e, 0x47, 0x03, 0x3a, 0x93, 0xfa, 0x63, + 0x45, 0xe7, 0x0f, 0xad, 0xe8, 0xc2, 0xac, 0x8a, 0x4e, 0xb4, 0x66, 0xcf, 0xa6, 0xb5, 0xe2, 0xe7, + 0xd5, 0x5a, 0xe9, 0xff, 0x5e, 0x6b, 0xb8, 0x09, 0x36, 0xa7, 0xcc, 0x9d, 0x65, 0xe8, 0xde, 0x15, + 0xba, 0xa9, 0x13, 0x3e, 0xc4, 0x1b, 0x50, 0x92, 0x7c, 0xa1, 0xa5, 0xac, 0xf2, 0xd2, 0xef, 0x36, + 0x51, 0x5c, 0x41, 0xab, 0x64, 0x21, 0x51, 0x49, 0x41, 0x08, 0x1b, 0xff, 0xd1, 0x82, 0x39, 0x65, + 0x11, 0xca, 0xf7, 0xdd, 0x82, 0xb2, 0xf4, 0x3d, 0xda, 0xef, 0x9d, 0xc8, 0xfa, 0xbd, 0x4b, 0x3d, + 0x77, 0xc4, 0x68, 0xd8, 0x69, 0xbf, 0x3f, 0x71, 0xac, 0x8f, 0x26, 0xce, 0xa3, 0x07, 0x09, 0x4d, + 0xc7, 0x1a, 0xed, 0x2f, 0x35, 0x61, 0x74, 0x46, 0xdc, 0x8e, 0x45, 0xca, 0xac, 0x8e, 0xac, 0xc8, + 0x10, 0x75, 0xc5, 0xef, 0xd3, 0x88, 0x53, 0xb6, 0xb9, 0x45, 0x10, 0x89, 0xc3, 0xd9, 0xbc, 0xeb, + 0x86, 0xbe, 0xe7, 0xf7, 0xa3, 0x66, 0x41, 0xf8, 0xf4, 0x78, 0x8e, 0x7f, 0x6a, 0xc1, 0x62, 0xca, + 0xac, 0x15, 0x13, 0x17, 0xa0, 0x14, 0x71, 0x4d, 0x69, 0x1e, 0x0c, 0xa3, 0xd8, 0x14, 0xf0, 0xce, + 0xbc, 0xba, 0x7c, 0x49, 0xce, 0x89, 0xc2, 0xbf, 0x7f, 0x57, 0xfb, 0x8b, 0x05, 0x75, 0x11, 0x98, + 0xf4, 0x5b, 0x43, 0x60, 0xfb, 0xee, 0x90, 0x2a, 0x55, 0x89, 0xb1, 0x11, 0xad, 0xf8, 0x71, 0x15, + 0x1d, 0xad, 0x66, 0x75, 0xb0, 0xd6, 0xa1, 0x1d, 0xac, 0x95, 0xbc, 0xbb, 0x06, 0x14, 0xb9, 0x79, + 0xef, 0x08, 0xe7, 0x5a, 0x25, 0x72, 0x82, 0x1f, 0x85, 0x39, 0xc5, 0x85, 0x12, 0xed, 0x41, 0x01, + 0x76, 0x08, 0x25, 0xa9, 0x09, 0xf4, 0x15, 0xa8, 0xc6, 0x89, 0x89, 0xe0, 0xb6, 0xd0, 0x29, 0xed, + 0x4d, 0x9c, 0x3c, 0x8b, 0x48, 0xb2, 0x80, 0x1c, 0x33, 0xe8, 0x5b, 0x9d, 0xea, 0xde, 0xc4, 0x91, + 0x00, 0x15, 0xe2, 0xd1, 0x29, 0xb0, 0xb7, 0x79, 0xdc, 0xe4, 0x22, 0xb0, 0x3b, 0x95, 0xbd, 0x89, + 0x23, 0xe6, 0x44, 0x7c, 0xe2, 0x75, 0xa8, 0x6f, 0xd0, 0xbe, 0xdb, 0xdd, 0x51, 0x87, 0x36, 0x34, + 0x39, 0x7e, 0xa0, 0xa5, 0x69, 0x3c, 0x0c, 0xf5, 0xf8, 0xc4, 0xb7, 0x86, 0x91, 0x7a, 0x0d, 0xb5, + 0x18, 0xf6, 0x72, 0x84, 0x7f, 0x66, 0x81, 0xb2, 0x01, 0x84, 0x8d, 0x6c, 0x87, 0xfb, 0x42, 0xd8, + 0x9b, 0x38, 0x0a, 0xa2, 0x93, 0x19, 0xf4, 0x2c, 0x94, 0x23, 0x71, 0x22, 0x27, 0x96, 0x35, 0x2d, + 0xb1, 0xd0, 0x39, 0xc2, 0x4d, 0x64, 0x6f, 0xe2, 0x68, 0x44, 0xa2, 0x07, 0x68, 0x25, 0x95, 0x10, + 0x48, 0xc6, 0xe6, 0xf7, 0x26, 0x8e, 0x01, 0x35, 0x13, 0x04, 0xfc, 0x99, 0x05, 0xb5, 0x1b, 0xae, + 0x17, 0x9b, 0x50, 0x53, 0xab, 0x28, 0xf1, 0xd5, 0x12, 0xc0, 0x2d, 0xb1, 0x47, 0x07, 0xee, 0xce, + 0xe5, 0x20, 0x14, 0x74, 0xe7, 0x48, 0x3c, 0x4f, 0x62, 0xb8, 0x3d, 0x35, 0x86, 0x17, 0x67, 0x77, + 0xed, 0xff, 0x5b, 0x47, 0x7a, 0xd5, 0xae, 0xe4, 0x17, 0x0a, 0xf8, 0x3d, 0x0b, 0xea, 0x92, 0x79, + 0x65, 0x79, 0xdf, 0x83, 0x92, 0x94, 0x8d, 0x60, 0xff, 0xbf, 0x38, 0xa6, 0x33, 0xb3, 0x38, 0x25, + 0x45, 0x13, 0x3d, 0x0f, 0xf3, 0xbd, 0x30, 0x18, 0x8d, 0x68, 0x6f, 0x53, 0xb9, 0xbf, 0x7c, 0xd6, + 0xfd, 0xad, 0x99, 0xeb, 0x24, 0x83, 0x8e, 0xff, 0x6a, 0xc1, 0x9c, 0x72, 0x26, 0x4a, 0x5d, 0xb1, + 0x88, 0xad, 0x43, 0x47, 0xcf, 0xfc, 0xac, 0xd1, 0xf3, 0x38, 0x94, 0xfa, 0x3c, 0xbe, 0x68, 0x87, + 0xa4, 0x66, 0xb3, 0x45, 0x55, 0x7c, 0x15, 0xe6, 0x35, 0x2b, 0x07, 0x78, 0xd4, 0xa5, 0xac, 0x47, + 0xbd, 0xd2, 0xa3, 0x3e, 0xf3, 0xb6, 0xbc, 0xd8, 0x47, 0x2a, 0x7c, 0xfc, 0x23, 0x0b, 0x16, 0xb2, + 0x28, 0x68, 0x2d, 0x53, 0x58, 0x3c, 0x72, 0x30, 0x39, 0xb3, 0xa6, 0xd0, 0xa4, 0x55, 0x65, 0xf1, + 0xd4, 0xbd, 0x2a, 0x8b, 0x86, 0xe9, 0x64, 0xaa, 0xca, 0x2b, 0xe0, 0x9f, 0x58, 0x30, 0x97, 0xd2, + 0x25, 0xba, 0x00, 0xf6, 0x56, 0x18, 0x0c, 0x67, 0x52, 0x94, 0xd8, 0x81, 0xbe, 0x0e, 0x79, 0x16, + 0xcc, 0xa4, 0xa6, 0x3c, 0x0b, 0xb8, 0x96, 0x14, 0xfb, 0x05, 0x99, 0xb7, 0xcb, 0x19, 0x7e, 0x0a, + 0xaa, 0x82, 0xa1, 0xeb, 0xae, 0x17, 0x4e, 0x0d, 0x18, 0xd3, 0x19, 0x7a, 0x16, 0x8e, 0x48, 0x67, + 0x38, 0x7d, 0x73, 0x7d, 0xda, 0xe6, 0xba, 0xde, 0x7c, 0x12, 0x8a, 0x22, 0xe9, 0xe0, 0x5b, 0x7a, + 0x2e, 0x73, 0xf5, 0x16, 0x3e, 0xc6, 0xc7, 0x60, 0x91, 0xbf, 0x41, 0x1a, 0x46, 0xab, 0xc1, 0xd8, + 0x67, 0xba, 0x6e, 0x3a, 0x0b, 0x8d, 0x34, 0x58, 0x59, 0x49, 0x03, 0x8a, 0x5d, 0x0e, 0x10, 0x34, + 0xe6, 0x88, 0x9c, 0xe0, 0x5f, 0x5a, 0x80, 0xd6, 0x29, 0x13, 0xa7, 0x5c, 0x59, 0x8b, 0x9f, 0xc7, + 0x12, 0x54, 0x86, 0x2e, 0xeb, 0x6e, 0xd3, 0x30, 0xd2, 0xf9, 0x8b, 0x9e, 0x7f, 0x19, 0x89, 0x27, + 0x3e, 0x07, 0x8b, 0xa9, 0x5b, 0x2a, 0x9e, 0x96, 0xa0, 0xd2, 0x55, 0x30, 0x15, 0xf2, 0xe2, 0x39, + 0xfe, 0x5d, 0x1e, 0x2a, 0x3a, 0xad, 0x43, 0xe7, 0xa0, 0xb6, 0xe5, 0xf9, 0x7d, 0x1a, 0x8e, 0x42, + 0x4f, 0x89, 0xc0, 0x96, 0x69, 0x9e, 0x01, 0x26, 0xe6, 0x04, 0x3d, 0x0e, 0xe5, 0x71, 0x44, 0xc3, + 0xb7, 0x3c, 0xf9, 0xd2, 0xab, 0x9d, 0xc6, 0xee, 0xc4, 0x29, 0xbd, 0x16, 0xd1, 0xf0, 0xca, 0x1a, + 0x0f, 0x3e, 0x63, 0x31, 0x22, 0xf2, 0xbb, 0x87, 0x5e, 0x52, 0x66, 0x2a, 0x12, 0xb8, 0xce, 0x37, + 0xf8, 0xf5, 0x33, 0xae, 0x6e, 0x14, 0x06, 0x43, 0xca, 0xb6, 0xe9, 0x38, 0x6a, 0x77, 0x83, 0xe1, + 0x30, 0xf0, 0xdb, 0xa2, 0x13, 0x20, 0x98, 0xe6, 0x11, 0x94, 0x6f, 0x57, 0x96, 0x7b, 0x03, 0xca, + 0x6c, 0x3b, 0x0c, 0xc6, 0xfd, 0x6d, 0x11, 0x18, 0x0a, 0x9d, 0x8b, 0xb3, 0xd3, 0xd3, 0x14, 0x88, + 0x1e, 0xa0, 0x87, 0xb9, 0xb4, 0x68, 0xf7, 0x76, 0x34, 0x1e, 0xca, 0xda, 0xb3, 0x53, 0xdc, 0x9b, + 0x38, 0xd6, 0xe3, 0x24, 0x06, 0xe3, 0x4b, 0x30, 0x97, 0x4a, 0x85, 0xd1, 0x13, 0x60, 0x87, 0x74, + 0x4b, 0xbb, 0x02, 0xb4, 0x3f, 0x63, 0x96, 0xd1, 0x9f, 0xe3, 0x10, 0xf1, 0x89, 0x7f, 0x98, 0x07, + 0xc7, 0xa8, 0xfa, 0x2f, 0x07, 0xe1, 0xcb, 0x94, 0x85, 0x5e, 0xf7, 0x9a, 0x3b, 0xa4, 0xda, 0xbc, + 0x1c, 0xa8, 0x0d, 0x05, 0xf0, 0x2d, 0xe3, 0x15, 0xc1, 0x30, 0xc6, 0x43, 0x0f, 0x01, 0x88, 0x67, + 0x27, 0xd7, 0xe5, 0x83, 0xaa, 0x0a, 0x88, 0x58, 0x5e, 0x4d, 0x09, 0xbb, 0x3d, 0xa3, 0x70, 0x94, + 0x90, 0xaf, 0x64, 0x85, 0x3c, 0x33, 0x9d, 0x58, 0xb2, 0xe6, 0x73, 0x29, 0xa6, 0x9f, 0x0b, 0xfe, + 0x9b, 0x05, 0xad, 0x0d, 0x7d, 0xf3, 0x43, 0x8a, 0x43, 0xf3, 0x9b, 0xbf, 0x4f, 0xfc, 0x16, 0xbe, + 0x18, 0xbf, 0xb8, 0x05, 0xb0, 0xe1, 0xf9, 0xf4, 0xb2, 0x37, 0x60, 0x34, 0x9c, 0x52, 0x08, 0xfd, + 0xb8, 0x90, 0x78, 0x15, 0x42, 0xb7, 0x34, 0x9f, 0xab, 0x86, 0x2b, 0xbf, 0x1f, 0x6c, 0xe4, 0xef, + 0xa3, 0xda, 0x0a, 0x19, 0x2f, 0xe7, 0x43, 0x79, 0x4b, 0xb0, 0x27, 0xa3, 0x72, 0xaa, 0xc7, 0x94, + 0xf0, 0xde, 0xf9, 0x96, 0x3a, 0xfc, 0xe9, 0x7b, 0x24, 0x55, 0xa2, 0xf3, 0xd7, 0x8e, 0x76, 0x7c, + 0xe6, 0xbe, 0x63, 0xec, 0x27, 0xfa, 0x10, 0xe4, 0xaa, 0xbc, 0xad, 0x38, 0x35, 0x6f, 0x7b, 0x4e, + 0x1d, 0xf3, 0x45, 0x72, 0x37, 0xfc, 0x5c, 0xe2, 0x44, 0x85, 0x52, 0x94, 0x13, 0x7d, 0xe4, 0x5e, + 0x4f, 0x5c, 0x3d, 0xec, 0x3f, 0x59, 0xb0, 0xb0, 0x4e, 0x59, 0x3a, 0x8f, 0x7a, 0x80, 0x54, 0x8a, + 0x5f, 0x84, 0xa3, 0xc6, 0xfd, 0x15, 0xf7, 0x4f, 0x66, 0x92, 0xa7, 0x63, 0x09, 0xff, 0x57, 0xfc, + 0x1e, 0x7d, 0x47, 0xd5, 0xa4, 0xe9, 0xbc, 0xe9, 0x3a, 0xd4, 0x8c, 0x45, 0x74, 0x29, 0x93, 0x31, + 0x2d, 0x66, 0x5a, 0xb1, 0x3c, 0xea, 0x77, 0x1a, 0x8a, 0x27, 0x59, 0x79, 0xaa, 0x7c, 0x38, 0xce, + 0x2e, 0x36, 0x01, 0x09, 0x75, 0x09, 0xb2, 0x66, 0x7c, 0x13, 0xd0, 0x97, 0xe2, 0xd4, 0x29, 0x9e, + 0xa3, 0x87, 0xc1, 0x0e, 0x83, 0xbb, 0x3a, 0x15, 0x9e, 0x4b, 0x8e, 0x24, 0xc1, 0x5d, 0x22, 0x96, + 0xf0, 0xb3, 0x50, 0x20, 0xc1, 0x5d, 0xd4, 0x02, 0x08, 0x5d, 0xbf, 0x4f, 0x6f, 0xc6, 0x45, 0x58, + 0x9d, 0x18, 0x90, 0x03, 0x72, 0x8f, 0x55, 0x38, 0x6a, 0xde, 0x48, 0xaa, 0x7b, 0x05, 0xca, 0xaf, + 0x8e, 0x4d, 0x71, 0x35, 0x32, 0xe2, 0x92, 0xb5, 0xbe, 0x46, 0xe2, 0x36, 0x03, 0x09, 0x1c, 0x9d, + 0x82, 0x2a, 0x73, 0x6f, 0x0d, 0xe8, 0xb5, 0xc4, 0xcd, 0x25, 0x00, 0xbe, 0xca, 0xeb, 0xc7, 0x9b, + 0x46, 0x12, 0x95, 0x00, 0xd0, 0x63, 0xb0, 0x90, 0xdc, 0xf9, 0x7a, 0x48, 0xb7, 0xbc, 0x77, 0x84, + 0x86, 0xeb, 0x64, 0x1f, 0x1c, 0x9d, 0x86, 0x23, 0x09, 0x6c, 0x53, 0x24, 0x2b, 0xb6, 0x40, 0xcd, + 0x82, 0xb9, 0x6c, 0x04, 0xbb, 0x2f, 0xdc, 0x19, 0xbb, 0x03, 0xf1, 0xf8, 0xea, 0xc4, 0x80, 0xe0, + 0x3f, 0x5b, 0x70, 0x54, 0xaa, 0x9a, 0xb9, 0xec, 0x81, 0xb4, 0xfa, 0x5f, 0x59, 0x80, 0x4c, 0x0e, + 0x94, 0x69, 0x7d, 0xd5, 0xec, 0x25, 0xf1, 0x6c, 0xa8, 0x26, 0xca, 0x62, 0x09, 0x4a, 0xda, 0x41, + 0x18, 0x4a, 0x5d, 0xd9, 0x33, 0x13, 0xcd, 0x6f, 0x59, 0x77, 0x4b, 0x08, 0x51, 0xdf, 0xc8, 0x81, + 0xe2, 0xad, 0x1d, 0x46, 0x23, 0x55, 0x35, 0x8b, 0x76, 0x81, 0x00, 0x10, 0xf9, 0xc5, 0xcf, 0xa2, + 0x3e, 0x13, 0x56, 0x63, 0x27, 0x67, 0x29, 0x10, 0xd1, 0x03, 0xfc, 0xdb, 0x3c, 0xcc, 0xdd, 0x0c, + 0x06, 0xe3, 0x24, 0x30, 0x3e, 0x48, 0x01, 0x23, 0x55, 0xca, 0x17, 0x75, 0x29, 0x8f, 0xc0, 0x8e, + 0x18, 0x1d, 0x09, 0xcb, 0x2a, 0x10, 0x31, 0x46, 0x18, 0xea, 0xcc, 0x0d, 0xfb, 0x94, 0xc9, 0x02, + 0xa9, 0x59, 0x12, 0x99, 0x6b, 0x0a, 0x86, 0x96, 0xa1, 0xe6, 0xf6, 0xfb, 0x21, 0xed, 0xbb, 0x8c, + 0x76, 0x76, 0x9a, 0x65, 0x71, 0x98, 0x09, 0xc2, 0x6f, 0xc0, 0xbc, 0x16, 0x96, 0x52, 0xe9, 0x13, + 0x50, 0x7e, 0x5b, 0x40, 0xa6, 0xb4, 0xd6, 0x24, 0xaa, 0x72, 0x63, 0x1a, 0x2d, 0xfd, 0x13, 0x82, + 0xbe, 0x33, 0xbe, 0x0a, 0x25, 0x89, 0x8e, 0x4e, 0x99, 0x65, 0x8e, 0xcc, 0xf4, 0xf8, 0x5c, 0xd5, + 0x2c, 0x18, 0x4a, 0x92, 0x90, 0x52, 0xbc, 0xb0, 0x0d, 0x09, 0x21, 0xea, 0x1b, 0xff, 0xcb, 0x82, + 0x63, 0x6b, 0x94, 0xd1, 0x2e, 0xa3, 0xbd, 0xcb, 0x1e, 0x1d, 0xf4, 0xbe, 0xd4, 0x0a, 0x3c, 0xee, + 0xa3, 0x15, 0x8c, 0x3e, 0x1a, 0xf7, 0x3b, 0x03, 0xcf, 0xa7, 0x1b, 0x46, 0x23, 0x26, 0x01, 0x70, + 0x0f, 0xb1, 0xc5, 0x2f, 0x2e, 0x97, 0xe5, 0x6f, 0x36, 0x06, 0x24, 0xd6, 0x70, 0x29, 0xd1, 0x30, + 0xfe, 0x81, 0x05, 0xc7, 0xb3, 0x5c, 0x2b, 0x25, 0xb5, 0xa1, 0x24, 0x36, 0x4f, 0x69, 0xe1, 0xa6, + 0x76, 0x10, 0x85, 0x86, 0x2e, 0xa4, 0xce, 0x17, 0xbf, 0xf5, 0x74, 0x9a, 0x7b, 0x13, 0xa7, 0x91, + 0x40, 0x8d, 0x2e, 0x81, 0x81, 0x8b, 0xff, 0xc0, 0x6b, 0x69, 0x93, 0xa6, 0xd0, 0x37, 0xb7, 0x2f, + 0xe5, 0x7b, 0xe5, 0x04, 0x7d, 0x0d, 0x6c, 0xb6, 0x33, 0x52, 0x2e, 0xb7, 0x73, 0xec, 0xb3, 0x89, + 0x73, 0x34, 0xb5, 0xed, 0xc6, 0xce, 0x88, 0x12, 0x81, 0xc2, 0xcd, 0xb2, 0xeb, 0x86, 0x3d, 0xcf, + 0x77, 0x07, 0x1e, 0x93, 0x62, 0xb4, 0x89, 0x09, 0x42, 0x4d, 0x28, 0x8f, 0xdc, 0x30, 0xd2, 0x79, + 0x53, 0x95, 0xe8, 0xa9, 0x68, 0x73, 0xdc, 0xa6, 0xac, 0xbb, 0x2d, 0xdd, 0xac, 0x6a, 0x73, 0x08, + 0x48, 0xaa, 0xcd, 0x21, 0x20, 0xf8, 0x17, 0x86, 0xe1, 0xc8, 0x37, 0x71, 0x48, 0xc3, 0xb1, 0x0e, + 0x6d, 0x38, 0xd6, 0x3d, 0x0c, 0x07, 0x7f, 0x27, 0xd1, 0xb2, 0xbe, 0xa2, 0xd2, 0xf2, 0xf3, 0x30, + 0xdf, 0x4b, 0xad, 0x1c, 0xac, 0x6d, 0xd9, 0xc2, 0xcd, 0xa0, 0xe3, 0xf5, 0x44, 0x75, 0x02, 0x72, + 0x80, 0xea, 0x32, 0xfa, 0xc8, 0xef, 0xd3, 0xc7, 0x63, 0x8f, 0x40, 0x35, 0xfe, 0x01, 0x0e, 0xd5, + 0xa0, 0x7c, 0xf9, 0x15, 0xf2, 0xfa, 0x25, 0xb2, 0xb6, 0x90, 0x43, 0x75, 0xa8, 0x74, 0x2e, 0xad, + 0xbe, 0x24, 0x66, 0xd6, 0xf9, 0xdf, 0x94, 0x74, 0x68, 0x0f, 0xd1, 0x37, 0xa1, 0x28, 0xe3, 0xf5, + 0xf1, 0xe4, 0xba, 0xe6, 0x6f, 0x53, 0x4b, 0x27, 0xf6, 0xc1, 0x25, 0xdf, 0x38, 0xf7, 0x84, 0x85, + 0xae, 0x41, 0x4d, 0x00, 0x55, 0xf7, 0xf7, 0x54, 0xb6, 0x09, 0x9b, 0xa2, 0xf4, 0xd0, 0x01, 0xab, + 0x06, 0xbd, 0x8b, 0x50, 0x94, 0x22, 0x38, 0x9e, 0x49, 0xab, 0xa6, 0xdc, 0x26, 0xd5, 0x0f, 0xc7, + 0x39, 0xf4, 0x0c, 0xd8, 0x37, 0x5c, 0x6f, 0x80, 0x8c, 0xac, 0xce, 0x68, 0xda, 0x2e, 0x1d, 0xcf, + 0x82, 0x8d, 0x63, 0x9f, 0x8b, 0x7b, 0xcf, 0x27, 0xb2, 0x0d, 0x30, 0xbd, 0xbd, 0xb9, 0x7f, 0x21, + 0x3e, 0xf9, 0x15, 0xd9, 0x21, 0xd5, 0x6d, 0x18, 0xf4, 0x50, 0xfa, 0xa8, 0x4c, 0xd7, 0x66, 0xa9, + 0x75, 0xd0, 0x72, 0x4c, 0x70, 0x03, 0x6a, 0x46, 0x0b, 0xc4, 0x14, 0xeb, 0xfe, 0xfe, 0x8d, 0x29, + 0xd6, 0x29, 0x7d, 0x13, 0x9c, 0x43, 0xeb, 0x50, 0xe1, 0xb9, 0xb0, 0xf8, 0xa9, 0xe4, 0x64, 0x36, + 0xe5, 0x35, 0x52, 0x9d, 0xa5, 0x53, 0xd3, 0x17, 0x63, 0x42, 0xdf, 0x86, 0xea, 0x3a, 0x65, 0x2a, + 0x5e, 0x9c, 0xc8, 0x06, 0x9c, 0x29, 0x92, 0x4a, 0x07, 0x2d, 0x9c, 0x43, 0x6f, 0x88, 0xb4, 0x3c, + 0xed, 0x2e, 0x91, 0x73, 0x80, 0x5b, 0x8c, 0xef, 0xb5, 0x7c, 0x30, 0x42, 0x4c, 0xf9, 0xf5, 0x14, + 0x65, 0x15, 0x59, 0x9d, 0x03, 0x9e, 0x60, 0x4c, 0xd9, 0xb9, 0xc7, 0x1f, 0x29, 0x70, 0xee, 0xfc, + 0x9b, 0xfa, 0xbf, 0x04, 0x6b, 0x2e, 0x73, 0xd1, 0x2b, 0x30, 0x2f, 0x64, 0x19, 0xff, 0xd9, 0x20, + 0x65, 0xf3, 0xfb, 0xfe, 0xd9, 0x90, 0xb2, 0xf9, 0xfd, 0xff, 0x70, 0xc0, 0xb9, 0xce, 0x9b, 0x1f, + 0x7c, 0xdc, 0xca, 0x7d, 0xf8, 0x71, 0x2b, 0xf7, 0xe9, 0xc7, 0x2d, 0xeb, 0xfb, 0xbb, 0x2d, 0xeb, + 0xd7, 0xbb, 0x2d, 0xeb, 0xfd, 0xdd, 0x96, 0xf5, 0xc1, 0x6e, 0xcb, 0xfa, 0xc7, 0x6e, 0xcb, 0xfa, + 0xe7, 0x6e, 0x2b, 0xf7, 0xe9, 0x6e, 0xcb, 0x7a, 0xf7, 0x93, 0x56, 0xee, 0x83, 0x4f, 0x5a, 0xb9, + 0x0f, 0x3f, 0x69, 0xe5, 0xbe, 0xfb, 0xe8, 0xbd, 0x4b, 0x50, 0xe9, 0xe8, 0x4a, 0xe2, 0xeb, 0xc9, + 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x04, 0x88, 0x1d, 0xe6, 0xf1, 0x22, 0x00, 0x00, } func (x Direction) String() string { @@ -4964,9 +4964,14 @@ func (this *DetectedField) Equal(that interface{}) bool { if this.Cardinality != that1.Cardinality { return false } - if this.Parser != that1.Parser { + if len(this.Parsers) != len(that1.Parsers) { return false } + for i := range this.Parsers { + if this.Parsers[i] != that1.Parsers[i] { + return false + } + } if !bytes.Equal(this.Sketch, that1.Sketch) { return false } @@ -5736,7 +5741,7 @@ func (this *DetectedField) GoString() string { s = append(s, "Label: "+fmt.Sprintf("%#v", this.Label)+",\n") s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n") - s = append(s, "Parser: "+fmt.Sprintf("%#v", this.Parser)+",\n") + s = append(s, "Parsers: "+fmt.Sprintf("%#v", this.Parsers)+",\n") s = append(s, "Sketch: "+fmt.Sprintf("%#v", this.Sketch)+",\n") s = append(s, "}") return strings.Join(s, "") @@ -8691,12 +8696,14 @@ func (m *DetectedField) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x2a } - if len(m.Parser) > 0 { - i -= len(m.Parser) - copy(dAtA[i:], m.Parser) - i = encodeVarintLogproto(dAtA, i, uint64(len(m.Parser))) - i-- - dAtA[i] = 0x22 + if len(m.Parsers) > 0 { + for iNdEx := len(m.Parsers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Parsers[iNdEx]) + copy(dAtA[i:], m.Parsers[iNdEx]) + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Parsers[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } if m.Cardinality != 0 { i = encodeVarintLogproto(dAtA, i, uint64(m.Cardinality)) @@ -9868,9 +9875,11 @@ func (m *DetectedField) Size() (n int) { if m.Cardinality != 0 { n += 1 + sovLogproto(uint64(m.Cardinality)) } - l = len(m.Parser) - if l > 0 { - n += 1 + l + sovLogproto(uint64(l)) + if len(m.Parsers) > 0 { + for _, s := range m.Parsers { + l = len(s) + n += 1 + l + sovLogproto(uint64(l)) + } } l = len(m.Sketch) if l > 0 { @@ -10622,7 +10631,7 @@ func (this *DetectedField) String() string { `Label:` + fmt.Sprintf("%v", this.Label) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, - `Parser:` + fmt.Sprintf("%v", this.Parser) + `,`, + `Parsers:` + fmt.Sprintf("%v", this.Parsers) + `,`, `Sketch:` + fmt.Sprintf("%v", this.Sketch) + `,`, `}`, }, "") @@ -17495,7 +17504,7 @@ func (m *DetectedField) Unmarshal(dAtA []byte) error { } case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parser", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parsers", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17523,7 +17532,7 @@ func (m *DetectedField) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Parser = string(dAtA[iNdEx:postIndex]) + m.Parsers = append(m.Parsers, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 5: if wireType != 2 { diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto index f6f8c12a8fdec..b9c3cd987c7aa 100644 --- a/pkg/logproto/logproto.proto +++ b/pkg/logproto/logproto.proto @@ -471,7 +471,7 @@ message DetectedField { string label = 1; string type = 2 [(gogoproto.casttype) = "DetectedFieldType"]; uint64 cardinality = 3; - string parser = 4; + repeated string parsers = 4; bytes sketch = 5 [(gogoproto.jsontag) = "sketch,omitempty"]; } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 73ea98d05fef5..0223b89b1b388 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -1108,7 +1108,7 @@ func (q *SingleTenantQuerier) DetectedFields(ctx context.Context, req *logproto. Type: v.fieldType, Cardinality: v.Estimate(), Sketch: sketch, - Parser: v.parser, + Parsers: v.parsers, } fieldCount++ @@ -1122,10 +1122,9 @@ func (q *SingleTenantQuerier) DetectedFields(ctx context.Context, req *logproto. } type parsedFields struct { - sketch *hyperloglog.Sketch - isTypeDetected bool - fieldType logproto.DetectedFieldType - parser string + sketch *hyperloglog.Sketch + fieldType logproto.DetectedFieldType + parsers []string } func newParsedFields(parser *string) *parsedFields { @@ -1134,10 +1133,9 @@ func newParsedFields(parser *string) *parsedFields { p = *parser } return &parsedFields{ - sketch: hyperloglog.New(), - isTypeDetected: false, - fieldType: logproto.DetectedFieldString, - parser: p, + sketch: hyperloglog.New(), + fieldType: logproto.DetectedFieldString, + parsers: []string{p}, } } @@ -1155,7 +1153,6 @@ func (p *parsedFields) Marshal() ([]byte, error) { func (p *parsedFields) DetermineType(value string) { p.fieldType = determineType(value) - p.isTypeDetected = true } func determineType(value string) logproto.DetectedFieldType { @@ -1187,6 +1184,7 @@ func parseDetectedFields(ctx context.Context, limit uint32, streams logqlmodel.S fieldCount := uint32(0) for _, stream := range streams { + detectType := true level.Debug(spanlogger.FromContext(ctx)).Log( "detected_fields", "true", "msg", fmt.Sprintf("looking for detected fields in stream %d with %d lines", stream.Hash, len(stream.Entries))) @@ -1196,7 +1194,6 @@ func parseDetectedFields(ctx context.Context, limit uint32, streams logqlmodel.S for k, vals := range detected { df, ok := detectedFields[k] if !ok && fieldCount < limit { - df = newParsedFields(parser) detectedFields[k] = df fieldCount++ @@ -1206,10 +1203,16 @@ func parseDetectedFields(ctx context.Context, limit uint32, streams logqlmodel.S continue } + if !slices.Contains(df.parsers, *parser) { + df.parsers = append(df.parsers, *parser) + } + for _, v := range vals { parsedFields := detectedFields[k] - if !parsedFields.isTypeDetected { + if detectType { + // we don't want to determine the type for every line, so we assume the type in each stream will be the same, and re-detect the type for the next stream parsedFields.DetermineType(v) + detectType = false } parsedFields.Insert(v) diff --git a/pkg/storage/detected/fields.go b/pkg/storage/detected/fields.go index 6310448216653..9d6a699bc1e14 100644 --- a/pkg/storage/detected/fields.go +++ b/pkg/storage/detected/fields.go @@ -1,16 +1,18 @@ package detected import ( + "slices" + "github.com/axiomhq/hyperloglog" "github.com/grafana/loki/v3/pkg/logproto" ) type UnmarshaledDetectedField struct { - Label string - Type logproto.DetectedFieldType - Parser string - Sketch *hyperloglog.Sketch + Label string + Type logproto.DetectedFieldType + Parsers []string + Sketch *hyperloglog.Sketch } func UnmarshalDetectedField(f *logproto.DetectedField) (*UnmarshaledDetectedField, error) { @@ -21,10 +23,10 @@ func UnmarshalDetectedField(f *logproto.DetectedField) (*UnmarshaledDetectedFiel } return &UnmarshaledDetectedField{ - Label: f.Label, - Type: f.Type, - Parser: f.Parser, - Sketch: sketch, + Label: f.Label, + Type: f.Type, + Parsers: f.Parsers, + Sketch: sketch, }, nil } @@ -35,6 +37,14 @@ func (f *UnmarshaledDetectedField) Merge(df *logproto.DetectedField) error { return err } + if f.Type != df.Type { + f.Type = logproto.DetectedFieldString + } + + f.Parsers = append(f.Parsers, df.Parsers...) + slices.Sort(f.Parsers) + f.Parsers = slices.Compact(f.Parsers) + return f.Sketch.Merge(sketch) } @@ -79,7 +89,7 @@ func MergeFields( Label: field.Label, Type: field.Type, Cardinality: field.Sketch.Estimate(), - Parser: field.Parser, + Parsers: field.Parsers, Sketch: nil, } result = append(result, detectedField) diff --git a/pkg/storage/detected/fields_test.go b/pkg/storage/detected/fields_test.go index 0e6ad800738ad..2c31fa1d2775e 100644 --- a/pkg/storage/detected/fields_test.go +++ b/pkg/storage/detected/fields_test.go @@ -34,7 +34,7 @@ func Test_MergeFields(t *testing.T) { Type: logproto.DetectedFieldString, Cardinality: 1, Sketch: marshalledFooSketch, - Parser: "logfmt", + Parsers: []string{"logfmt", "json"}, }, { Label: "bar", @@ -47,6 +47,19 @@ func Test_MergeFields(t *testing.T) { Type: logproto.DetectedFieldString, Cardinality: 3, Sketch: marhsalledOtherFooSketch, + Parsers: []string{"json"}, + }, + { + Label: "baz", + Type: logproto.DetectedFieldBoolean, + Cardinality: 3, + Sketch: marhsalledOtherFooSketch, + }, + { + Label: "baz", + Type: logproto.DetectedFieldFloat, + Cardinality: 3, + Sketch: marhsalledOtherFooSketch, }, } @@ -55,18 +68,24 @@ func Test_MergeFields(t *testing.T) { t.Run("merges fields", func(t *testing.T) { result, err := MergeFields(fields, limit) require.NoError(t, err) - assert.Equal(t, 2, len(result)) + assert.Equal(t, 3, len(result)) var foo *logproto.DetectedField + var baz *logproto.DetectedField for _, field := range result { if field.Label == "foo" { foo = field } + if field.Label == "baz" { + baz = field + } } assert.Equal(t, logproto.DetectedFieldString, foo.Type) assert.Equal(t, uint64(3), foo.Cardinality) - assert.Equal(t, "logfmt", foo.Parser) + assert.Equal(t, []string{"json", "logfmt"}, foo.Parsers) + + assert.Equal(t, logproto.DetectedFieldString, baz.Type) }) t.Run("returns up to limit number of fields", func(t *testing.T) { @@ -78,7 +97,7 @@ func Test_MergeFields(t *testing.T) { highLimit := uint32(4) result, err = MergeFields(fields, highLimit) require.NoError(t, err) - assert.Equal(t, 2, len(result)) + assert.Equal(t, 3, len(result)) }) t.Run("returns an error when the field cannot be unmarshalled", func(t *testing.T) { From afd9e363065ee2bb07844f8dbb30cc8dfb310c9d Mon Sep 17 00:00:00 2001 From: Dylan Guedes Date: Wed, 8 May 2024 08:55:39 -0300 Subject: [PATCH 10/47] chore: Call `shardstreams.Config` by value instead of by reference (#12915) Use `shardstreams.Config` by value instead of by reference to fix docs generation. Our `docs-generator` tool relies on the struct address/references to assume that flags are present. Using this config by value fixes it. --- docs/sources/shared/configuration.md | 16 +++++++++++++--- pkg/distributor/distributor.go | 6 +++--- pkg/distributor/limits.go | 2 +- pkg/distributor/ratestore_test.go | 6 +++--- pkg/distributor/shardstreams/config.go | 7 ++++--- pkg/ingester/instance_test.go | 2 +- pkg/ingester/limiter.go | 2 +- pkg/loki/loki.go | 2 +- pkg/validation/limits.go | 5 ++--- 9 files changed, 29 insertions(+), 19 deletions(-) diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index df40267e9ae84..9e933b87d4de7 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -3305,12 +3305,22 @@ ruler_remote_write_sigv4_config: # Deprecated: Use deletion_mode per tenant configuration instead. [allow_deletes: ] +# Define streams sharding behavior. shard_streams: - [enabled: ] + # Automatically shard streams to keep them under the per-stream rate limit. + # Sharding is dictated by the desired rate. + # CLI flag: -shard-streams.enabled + [enabled: | default = true] - [logging_enabled: ] + # Whether to log sharding streams behavior or not. Not recommended for + # production environments. + # CLI flag: -shard-streams.logging-enabled + [logging_enabled: | default = false] - [desired_rate: ] + # Threshold used to cut a new shard. Default (1536KB) means if a rate is above + # 1536KB/s, it will be sharded into two streams. + # CLI flag: -shard-streams.desired-rate + [desired_rate: | default = 1536KB] [blocked_queries: ] diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 268db96e897ac..87036b5e23c37 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -589,7 +589,7 @@ func (d *Distributor) shardStream(stream logproto.Stream, pushSize int, tenantID return d.divideEntriesBetweenShards(tenantID, shardCount, shardStreamsCfg, stream) } -func (d *Distributor) divideEntriesBetweenShards(tenantID string, totalShards int, shardStreamsCfg *shardstreams.Config, stream logproto.Stream) []KeyedStream { +func (d *Distributor) divideEntriesBetweenShards(tenantID string, totalShards int, shardStreamsCfg shardstreams.Config, stream logproto.Stream) []KeyedStream { derivedStreams := d.createShards(stream, totalShards, tenantID, shardStreamsCfg) for i := 0; i < len(stream.Entries); i++ { @@ -601,7 +601,7 @@ func (d *Distributor) divideEntriesBetweenShards(tenantID string, totalShards in return derivedStreams } -func (d *Distributor) createShards(stream logproto.Stream, totalShards int, tenantID string, shardStreamsCfg *shardstreams.Config) []KeyedStream { +func (d *Distributor) createShards(stream logproto.Stream, totalShards int, tenantID string, shardStreamsCfg shardstreams.Config) []KeyedStream { var ( streamLabels = labelTemplate(stream.Labels, d.logger) streamPattern = streamLabels.String() @@ -809,7 +809,7 @@ func (d *Distributor) parseStreamLabels(vContext validationContext, key string, // based on the rate stored in the rate store and will store the new evaluated number of shards. // // desiredRate is expected to be given in bytes. -func (d *Distributor) shardCountFor(logger log.Logger, stream *logproto.Stream, pushSize int, tenantID string, streamShardcfg *shardstreams.Config) int { +func (d *Distributor) shardCountFor(logger log.Logger, stream *logproto.Stream, pushSize int, tenantID string, streamShardcfg shardstreams.Config) int { if streamShardcfg.DesiredRate.Val() <= 0 { if streamShardcfg.LoggingEnabled { level.Error(logger).Log("msg", "invalid desired rate", "desired_rate", streamShardcfg.DesiredRate.String()) diff --git a/pkg/distributor/limits.go b/pkg/distributor/limits.go index 05734db4184f0..a207570c25d51 100644 --- a/pkg/distributor/limits.go +++ b/pkg/distributor/limits.go @@ -25,7 +25,7 @@ type Limits interface { DiscoverServiceName(userID string) []string DiscoverLogLevels(userID string) bool - ShardStreams(userID string) *shardstreams.Config + ShardStreams(userID string) shardstreams.Config IngestionRateStrategy() string IngestionRateBytes(userID string) float64 IngestionBurstSizeBytes(userID string) int diff --git a/pkg/distributor/ratestore_test.go b/pkg/distributor/ratestore_test.go index af9fa9f0adb70..5bfacf96ebd46 100644 --- a/pkg/distributor/ratestore_test.go +++ b/pkg/distributor/ratestore_test.go @@ -341,15 +341,15 @@ type fakeOverrides struct { func (c *fakeOverrides) AllByUserID() map[string]*validation.Limits { return map[string]*validation.Limits{ "ingester0": { - ShardStreams: &shardstreams.Config{ + ShardStreams: shardstreams.Config{ Enabled: c.enabled, }, }, } } -func (c *fakeOverrides) ShardStreams(_ string) *shardstreams.Config { - return &shardstreams.Config{ +func (c *fakeOverrides) ShardStreams(_ string) shardstreams.Config { + return shardstreams.Config{ Enabled: c.enabled, } } diff --git a/pkg/distributor/shardstreams/config.go b/pkg/distributor/shardstreams/config.go index 1bf1f89f961c6..5c39fcc28d6c5 100644 --- a/pkg/distributor/shardstreams/config.go +++ b/pkg/distributor/shardstreams/config.go @@ -7,12 +7,13 @@ import ( ) type Config struct { - Enabled bool `yaml:"enabled" json:"enabled"` - LoggingEnabled bool `yaml:"logging_enabled" json:"logging_enabled"` + Enabled bool `yaml:"enabled" json:"enabled" doc:"description=Automatically shard streams to keep them under the per-stream rate limit. Sharding is dictated by the desired rate."` + + LoggingEnabled bool `yaml:"logging_enabled" json:"logging_enabled" doc:"description=Whether to log sharding streams behavior or not. Not recommended for production environments."` // DesiredRate is the threshold used to shard the stream into smaller pieces. // Expected to be in bytes. - DesiredRate flagext.ByteSize `yaml:"desired_rate" json:"desired_rate"` + DesiredRate flagext.ByteSize `yaml:"desired_rate" json:"desired_rate" doc:"description=Threshold used to cut a new shard. Default (1536KB) means if a rate is above 1536KB/s, it will be sharded into two streams."` } func (cfg *Config) RegisterFlagsWithPrefix(prefix string, fs *flag.FlagSet) { diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go index acc5864fc5573..88b613aa8db2d 100644 --- a/pkg/ingester/instance_test.go +++ b/pkg/ingester/instance_test.go @@ -1049,7 +1049,7 @@ func (f fakeLimits) AllByUserID() map[string]*validation.Limits { func TestStreamShardingUsage(t *testing.T) { setupCustomTenantLimit := func(perStreamLimit string) *validation.Limits { - shardStreamsCfg := &shardstreams.Config{Enabled: true, LoggingEnabled: true} + shardStreamsCfg := shardstreams.Config{Enabled: true, LoggingEnabled: true} shardStreamsCfg.DesiredRate.Set("6MB") //nolint:errcheck customTenantLimits := &validation.Limits{} diff --git a/pkg/ingester/limiter.go b/pkg/ingester/limiter.go index 193209a54f6b9..94c77a30be7e3 100644 --- a/pkg/ingester/limiter.go +++ b/pkg/ingester/limiter.go @@ -27,7 +27,7 @@ type Limits interface { MaxLocalStreamsPerUser(userID string) int MaxGlobalStreamsPerUser(userID string) int PerStreamRateLimit(userID string) validation.RateLimit - ShardStreams(userID string) *shardstreams.Config + ShardStreams(userID string) shardstreams.Config } // Limiter implements primitives to get the maximum number of streams diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index aef0bd440d56d..ff9c00e0ed598 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -97,7 +97,7 @@ type Config struct { CompactorConfig compactor.Config `yaml:"compactor,omitempty"` CompactorHTTPClient compactorclient.HTTPConfig `yaml:"compactor_client,omitempty" doc:"hidden"` CompactorGRPCClient compactorclient.GRPCConfig `yaml:"compactor_grpc_client,omitempty"` - LimitsConfig validation.Limits `yaml:"limits_config,omitempty"` + LimitsConfig validation.Limits `yaml:"limits_config"` Worker worker.Config `yaml:"frontend_worker,omitempty"` TableManager index.TableManagerConfig `yaml:"table_manager,omitempty"` MemberlistKV memberlist.KVConfig `yaml:"memberlist"` diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index 27e6702dc9889..a4280a2ee9e36 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -187,7 +187,7 @@ type Limits struct { // Deprecated CompactorDeletionEnabled bool `yaml:"allow_deletes" json:"allow_deletes" doc:"deprecated|description=Use deletion_mode per tenant configuration instead."` - ShardStreams *shardstreams.Config `yaml:"shard_streams" json:"shard_streams"` + ShardStreams shardstreams.Config `yaml:"shard_streams" json:"shard_streams" doc:"description=Define streams sharding behavior."` BlockedQueries []*validation.BlockedQuery `yaml:"blocked_queries,omitempty" json:"blocked_queries,omitempty"` @@ -388,7 +388,6 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { ), ) - l.ShardStreams = &shardstreams.Config{} l.ShardStreams.RegisterFlagsWithPrefix("shard-streams", f) f.IntVar(&l.VolumeMaxSeries, "limits.volume-max-series", 1000, "The default number of aggregated series or labels that can be returned from a log-volume endpoint") @@ -900,7 +899,7 @@ func (o *Overrides) DeletionMode(userID string) string { return o.getOverridesForUser(userID).DeletionMode } -func (o *Overrides) ShardStreams(userID string) *shardstreams.Config { +func (o *Overrides) ShardStreams(userID string) shardstreams.Config { return o.getOverridesForUser(userID).ShardStreams } From 67ed2f7092c8c0d97ba0bec08fde7ede65faa33f Mon Sep 17 00:00:00 2001 From: Tom Donohue Date: Wed, 8 May 2024 16:11:58 +0100 Subject: [PATCH 11/47] fix(helm): Fix GEL image tag, bucket name and proxy URLs (#12878) Signed-off-by: Vladyslav Diachenko Co-authored-by: Vladyslav Diachenko --- docs/sources/setup/install/helm/reference.md | 5 +- production/helm/loki/Chart.yaml | 4 +- production/helm/loki/README.md | 4 +- production/helm/loki/templates/_helpers.tpl | 76 +++++++++++++++++++ .../deployment-gateway-enterprise.yaml | 14 +++- production/helm/loki/values.yaml | 11 +-- 6 files changed, 100 insertions(+), 14 deletions(-) diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index de875439bf2b8..2a538a7516175 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -2572,7 +2572,7 @@ null }, "canarySecret": null, "cluster_name": null, - "config": "{{- if .Values.enterprise.adminApi.enabled }}\n{{- if or .Values.minio.enabled (eq .Values.loki.storage.type \"s3\") (eq .Values.loki.storage.type \"gcs\") (eq .Values.loki.storage.type \"azure\") }}\nadmin_client:\n storage:\n s3:\n bucket_name: admin\n{{- end }}\n{{- end }}\nauth:\n type: {{ .Values.enterprise.adminApi.enabled | ternary \"enterprise\" \"trust\" }}\nauth_enabled: {{ .Values.loki.auth_enabled }}\ncluster_name: {{ include \"loki.clusterName\" . }}\nlicense:\n path: /etc/loki/license/license.jwt\n", + "config": "{{- if .Values.enterprise.adminApi.enabled }}\nadmin_client:\n {{ include \"enterprise-logs.adminAPIStorageConfig\" . | nindent 2 }}\n{{ end }}\nauth:\n type: {{ .Values.enterprise.adminApi.enabled | ternary \"enterprise\" \"trust\" }}\nauth_enabled: {{ .Values.loki.auth_enabled }}\ncluster_name: {{ include \"loki.clusterName\" . }}\nlicense:\n path: /etc/loki/license/license.jwt\n", "enabled": false, "externalConfigName": "", "externalLicenseName": null, @@ -2630,7 +2630,7 @@ null "tolerations": [] }, "useExternalLicense": false, - "version": "v3.0.1" + "version": "3.0.1" } @@ -5594,6 +5594,7 @@ null "userAssignedId": null }, "filesystem": { + "admin_api_directory": "/var/loki/admin", "chunks_directory": "/var/loki/chunks", "rules_directory": "/var/loki/rules" }, diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 60ea27903dbbe..595c4b5710cd5 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -1,9 +1,9 @@ apiVersion: v2 name: loki -description: Helm chart for Grafana Loki in simple, scalable mode +description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. type: application appVersion: 3.0.0 -version: 6.5.0 +version: 6.5.1 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index b5b5961421864..9e0dce69d8385 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,8 +1,8 @@ # loki -![Version: 6.5.0](https://img.shields.io/badge/Version-6.5.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) +![Version: 6.5.1](https://img.shields.io/badge/Version-6.5.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) -Helm chart for Grafana Loki in simple, scalable mode +Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. ## Source Code diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl index d88b86e40b960..c71c6c23aff88 100644 --- a/production/helm/loki/templates/_helpers.tpl +++ b/production/helm/loki/templates/_helpers.tpl @@ -454,6 +454,82 @@ ruler: {{- end }} {{- end }} +{{/* Enterprise Logs Admin API storage config */}} +{{- define "enterprise-logs.adminAPIStorageConfig" }} +storage: + {{- if .Values.minio.enabled }} + backend: "s3" + s3: + bucket_name: admin + {{- else if eq .Values.loki.storage.type "s3" -}} + {{- with .Values.loki.storage.s3 }} + backend: "s3" + s3: + bucket_name: {{ $.Values.loki.storage.bucketNames.admin }} + {{- end -}} + {{- else if eq .Values.loki.storage.type "gcs" -}} + {{- with .Values.loki.storage.gcs }} + backend: "gcs" + gcs: + bucket_name: {{ $.Values.loki.storage.bucketNames.admin }} + {{- end -}} + {{- else if eq .Values.loki.storage.type "azure" -}} + {{- with .Values.loki.storage.azure }} + backend: "azure" + azure: + account_name: {{ .accountName }} + {{- with .accountKey }} + account_key: {{ . }} + {{- end }} + {{- with .connectionString }} + connection_string: {{ . }} + {{- end }} + container_name: {{ $.Values.loki.storage.bucketNames.admin }} + {{- with .endpointSuffix }} + endpoint_suffix: {{ . }} + {{- end }} + {{- end -}} + {{- else if eq .Values.loki.storage.type "swift" -}} + {{- with .Values.loki.storage.swift }} + backend: "swift" + swift: + {{- with .auth_version }} + auth_version: {{ . }} + {{- end }} + auth_url: {{ .auth_url }} + {{- with .internal }} + internal: {{ . }} + {{- end }} + username: {{ .username }} + user_domain_name: {{ .user_domain_name }} + {{- with .user_domain_id }} + user_domain_id: {{ . }} + {{- end }} + {{- with .user_id }} + user_id: {{ . }} + {{- end }} + password: {{ .password }} + {{- with .domain_id }} + domain_id: {{ . }} + {{- end }} + domain_name: {{ .domain_name }} + project_id: {{ .project_id }} + project_name: {{ .project_name }} + project_domain_id: {{ .project_domain_id }} + project_domain_name: {{ .project_domain_name }} + region_name: {{ .region_name }} + container_name: {{ .container_name }} + max_retries: {{ .max_retries | default 3 }} + connect_timeout: {{ .connect_timeout | default "10s" }} + request_timeout: {{ .request_timeout | default "5s" }} + {{- end -}} + {{- else }} + backend: "filesystem" + filesystem: + dir: {{ .Values.loki.storage.filesystem.admin_api_directory }} + {{- end -}} +{{- end }} + {{/* Calculate the config from structured and unstructured text input */}} diff --git a/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml b/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml index 4f7dccac911ed..de8ba11058eb1 100644 --- a/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml +++ b/production/helm/loki/templates/gateway/deployment-gateway-enterprise.yaml @@ -1,3 +1,5 @@ +{{- $isDistributed := eq (include "loki.deployment.isDistributed" .) "true" -}} +{{- $isSimpleScalable := eq (include "loki.deployment.isScalable" .) "true" -}} {{- if and .Values.gateway.enabled .Values.enterprise.enabled .Values.enterprise.gelGateway }} apiVersion: apps/v1 kind: Deployment @@ -69,7 +71,7 @@ spec: - -admin.client.s3.secret-access-key={{ .Values.minio.secretKey }} - -admin.client.s3.insecure=true {{- end }} - {{- if .Values.enterpriseGateway.useDefaultProxyURLs }} + {{- if and $isDistributed .Values.enterpriseGateway.useDefaultProxyURLs }} - -gateway.proxy.default.url=http://{{ template "loki.fullname" . }}-admin-api.{{ .Release.Namespace }}.svc:3100 - -gateway.proxy.admin-api.url=http://{{ template "loki.fullname" . }}-admin-api.{{ .Release.Namespace }}.svc:3100 - -gateway.proxy.distributor.url=dns:///{{ template "loki.fullname" . }}-distributor-headless.{{ .Release.Namespace }}.svc:9095 @@ -77,6 +79,16 @@ spec: - -gateway.proxy.query-frontend.url=http://{{ template "loki.fullname" . }}-query-frontend.{{ .Release.Namespace }}.svc:3100 - -gateway.proxy.ruler.url=http://{{ template "loki.fullname" . }}-ruler.{{ .Release.Namespace }}.svc:3100 {{- end }} + {{- if and $isSimpleScalable .Values.enterpriseGateway.useDefaultProxyURLs }} + - -gateway.proxy.default.url=http://{{ template "enterprise-logs.adminApiFullname" . }}.{{ .Release.Namespace }}.svc:3100 + - -gateway.proxy.admin-api.url=http://{{ template "enterprise-logs.adminApiFullname" . }}.{{ .Release.Namespace }}.svc:3100 + - -gateway.proxy.compactor.url=http://{{ template "loki.backendFullname" . }}-headless.{{ .Release.Namespace }}.svc:3100 + - -gateway.proxy.distributor.url=dns:///{{ template "loki.writeFullname" . }}-headless.{{ .Release.Namespace }}.svc:9095 + - -gateway.proxy.ingester.url=http://{{ template "loki.writeFullname" . }}.{{ .Release.Namespace }}.svc:3100 + - -gateway.proxy.query-frontend.url=http://{{ template "loki.readFullname" . }}.{{ .Release.Namespace }}.svc:3100 + - -gateway.proxy.ruler.url=http://{{ template "loki.backendFullname" . }}-headless.{{ .Release.Namespace }}.svc:3100 + - -gateway.proxy.query-scheduler.url=http://{{ template "loki.backendFullname" . }}-headless.{{ .Release.Namespace }}.svc:3100 + {{- end }} {{- range $key, $value := .Values.enterpriseGateway.extraArgs }} - "-{{ $key }}={{ $value }}" {{- end }} diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index 36d42cddc09c5..11e579e6c885c 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -365,6 +365,7 @@ loki: filesystem: chunks_directory: /var/loki/chunks rules_directory: /var/loki/rules + admin_api_directory: /var/loki/admin # -- Configure memcached as an external cache for chunk and results cache. Disabled by default # must enable and specify a host for each cache you would like to use. memcached: @@ -449,7 +450,7 @@ enterprise: # Enable enterprise features, license must be provided enabled: false # Default verion of GEL to deploy - version: v3.0.1 + version: 3.0.1 # -- Optional name of the GEL cluster, otherwise will use .Release.Name # The cluster name must match what is in your GEL license cluster_name: null @@ -476,13 +477,9 @@ enterprise: # enterprise specific sections of the config.yaml file config: | {{- if .Values.enterprise.adminApi.enabled }} - {{- if or .Values.minio.enabled (eq .Values.loki.storage.type "s3") (eq .Values.loki.storage.type "gcs") (eq .Values.loki.storage.type "azure") }} admin_client: - storage: - s3: - bucket_name: admin - {{- end }} - {{- end }} + {{ include "enterprise-logs.adminAPIStorageConfig" . | nindent 2 }} + {{ end }} auth: type: {{ .Values.enterprise.adminApi.enabled | ternary "enterprise" "trust" }} auth_enabled: {{ .Values.loki.auth_enabled }} From 18a558c7fd37be6911e16e539146172f02380df1 Mon Sep 17 00:00:00 2001 From: Dylan Guedes Date: Wed, 8 May 2024 19:25:42 -0300 Subject: [PATCH 12/47] docs: Don't allow running new and old querier worker grpc clients (#12916) --- docs/sources/shared/configuration.md | 8 ++++---- pkg/loki/config_wrapper.go | 22 +++++++++------------- pkg/querier/worker/worker.go | 6 +++--- 3 files changed, 16 insertions(+), 20 deletions(-) diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index 9e933b87d4de7..fe57f40daa581 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -2237,19 +2237,19 @@ The `frontend_worker` configures the worker - running within the Loki querier - [id: | default = ""] # Configures the querier gRPC client used to communicate with the -# query-frontend. Shouldn't be used in conjunction with 'grpc_client_config'. +# query-frontend. This can't be used in conjunction with 'grpc_client_config'. # The CLI flags prefix for this block configuration is: # querier.frontend-grpc-client [query_frontend_grpc_client: ] # Configures the querier gRPC client used to communicate with the query-frontend -# and with the query-scheduler if 'query_scheduler_grpc_client' isn't defined. -# This shouldn't be used if 'query_frontend_grpc_client' is defined. +# and with the query-scheduler. This can't be used in conjunction with +# 'query_frontend_grpc_client' or 'query_scheduler_grpc_client'. # The CLI flags prefix for this block configuration is: querier.frontend-client [grpc_client_config: ] # Configures the querier gRPC client used to communicate with the -# query-scheduler. If not defined, 'grpc_client_config' is used instead. +# query-scheduler. This can't be used in conjunction with 'grpc_client_config'. # The CLI flags prefix for this block configuration is: # querier.scheduler-grpc-client [query_scheduler_grpc_client: ] diff --git a/pkg/loki/config_wrapper.go b/pkg/loki/config_wrapper.go index 2a4789fb9e60f..4c8da5de23aea 100644 --- a/pkg/loki/config_wrapper.go +++ b/pkg/loki/config_wrapper.go @@ -689,21 +689,17 @@ func applyChunkRetain(cfg, defaults *ConfigWrapper) { } func applyCommonQuerierWorkerGRPCConfig(cfg, defaults *ConfigWrapper) error { - if !reflect.DeepEqual(cfg.Worker.OldQueryFrontendGRPCClientConfig, defaults.Worker.OldQueryFrontendGRPCClientConfig) { - // User is using the old grpc configuration. - - if reflect.DeepEqual(cfg.Worker.NewQueryFrontendGRPCClientConfig, defaults.Worker.NewQueryFrontendGRPCClientConfig) { - // User is using the old grpc configuration only, we can just copy it to the new grpc client struct. - cfg.Worker.NewQueryFrontendGRPCClientConfig = cfg.Worker.OldQueryFrontendGRPCClientConfig - } else { - // User is using both, old and new way of configuring the grpc client, so we throw an error. - return fmt.Errorf("both `grpc_client_config` and `query_frontend_grpc_client` are set at the same time. Please use only one of them") - } + usingNewFrontendCfg := !reflect.DeepEqual(cfg.Worker.NewQueryFrontendGRPCClientConfig, defaults.Worker.NewQueryFrontendGRPCClientConfig) + usingNewSchedulerCfg := !reflect.DeepEqual(cfg.Worker.QuerySchedulerGRPCClientConfig, defaults.Worker.QuerySchedulerGRPCClientConfig) + usingOldFrontendCfg := !reflect.DeepEqual(cfg.Worker.OldQueryFrontendGRPCClientConfig, defaults.Worker.OldQueryFrontendGRPCClientConfig) - if reflect.DeepEqual(cfg.Worker.QuerySchedulerGRPCClientConfig, defaults.Worker.QuerySchedulerGRPCClientConfig) { - // Since the scheduler grpc client is not set, we can just copy the old query frontend grpc client to the scheduler grpc client. - cfg.Worker.QuerySchedulerGRPCClientConfig = cfg.Worker.OldQueryFrontendGRPCClientConfig + if usingOldFrontendCfg { + if usingNewFrontendCfg || usingNewSchedulerCfg { + return fmt.Errorf("both `grpc_client_config` and (`query_frontend_grpc_client` or `query_scheduler_grpc_client`) are set at the same time. Please use only `query_frontend_grpc_client` and `query_scheduler_grpc_client`") } + cfg.Worker.NewQueryFrontendGRPCClientConfig = cfg.Worker.OldQueryFrontendGRPCClientConfig + cfg.Worker.QuerySchedulerGRPCClientConfig = cfg.Worker.OldQueryFrontendGRPCClientConfig } + return nil } diff --git a/pkg/querier/worker/worker.go b/pkg/querier/worker/worker.go index 7d7b46dc814f5..0c13bdd6df9d0 100644 --- a/pkg/querier/worker/worker.go +++ b/pkg/querier/worker/worker.go @@ -30,10 +30,10 @@ type Config struct { QuerierID string `yaml:"id"` - NewQueryFrontendGRPCClientConfig grpcclient.Config `yaml:"query_frontend_grpc_client" doc:"description=Configures the querier gRPC client used to communicate with the query-frontend. Shouldn't be used in conjunction with 'grpc_client_config'."` - OldQueryFrontendGRPCClientConfig grpcclient.Config `yaml:"grpc_client_config" doc:"description=Configures the querier gRPC client used to communicate with the query-frontend and with the query-scheduler if 'query_scheduler_grpc_client' isn't defined. This shouldn't be used if 'query_frontend_grpc_client' is defined."` + NewQueryFrontendGRPCClientConfig grpcclient.Config `yaml:"query_frontend_grpc_client" doc:"description=Configures the querier gRPC client used to communicate with the query-frontend. This can't be used in conjunction with 'grpc_client_config'."` + OldQueryFrontendGRPCClientConfig grpcclient.Config `yaml:"grpc_client_config" doc:"description=Configures the querier gRPC client used to communicate with the query-frontend and with the query-scheduler. This can't be used in conjunction with 'query_frontend_grpc_client' or 'query_scheduler_grpc_client'."` - QuerySchedulerGRPCClientConfig grpcclient.Config `yaml:"query_scheduler_grpc_client" doc:"description=Configures the querier gRPC client used to communicate with the query-scheduler. If not defined, 'grpc_client_config' is used instead."` + QuerySchedulerGRPCClientConfig grpcclient.Config `yaml:"query_scheduler_grpc_client" doc:"description=Configures the querier gRPC client used to communicate with the query-scheduler. This can't be used in conjunction with 'grpc_client_config'."` } func (cfg *Config) RegisterFlags(f *flag.FlagSet) { From 3a46d3717d982c3718dc6deae1a2ab88c5b71f83 Mon Sep 17 00:00:00 2001 From: MarkDaveny <168091250+MarkDaveny@users.noreply.github.com> Date: Thu, 9 May 2024 20:54:19 +0800 Subject: [PATCH 13/47] style: fix function names (#12817) Signed-off-by: MarkDaveny --- pkg/ruler/evaluator_remote_test.go | 2 +- pkg/storage/bloom/v1/filter/partitioned.go | 2 +- pkg/storage/chunk/client/grpc/grpc_server_mock_test.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/ruler/evaluator_remote_test.go b/pkg/ruler/evaluator_remote_test.go index 515b8ea306528..0b11978a7f7ed 100644 --- a/pkg/ruler/evaluator_remote_test.go +++ b/pkg/ruler/evaluator_remote_test.go @@ -195,7 +195,7 @@ func TestRemoteEvalEmptyScalarResponse(t *testing.T) { require.Empty(t, res.Data) } -// TestRemoteEvalEmptyVectorResponse validates that an empty vector response is valid and does not cause an error +// TestRemoteEvalVectorResponse validates that an empty vector response is valid and does not cause an error func TestRemoteEvalVectorResponse(t *testing.T) { defaultLimits := defaultLimitsTestConfig() limits, err := validation.NewOverrides(defaultLimits, nil) diff --git a/pkg/storage/bloom/v1/filter/partitioned.go b/pkg/storage/bloom/v1/filter/partitioned.go index c3eb949840ed7..b487c1d0d6db9 100644 --- a/pkg/storage/bloom/v1/filter/partitioned.go +++ b/pkg/storage/bloom/v1/filter/partitioned.go @@ -56,7 +56,7 @@ type PartitionedBloomFilter struct { optimalCount uint // optimal number of distinct items that can be stored in this filter } -// NewPartitionedBloomFilterWithEstimates creates a new partitioned Bloom filter +// NewPartitionedBloomFilterWithCapacity creates a new partitioned Bloom filter // with a specific capacity func NewPartitionedBloomFilterWithCapacity(m uint, fpRate float64) *PartitionedBloomFilter { var ( diff --git a/pkg/storage/chunk/client/grpc/grpc_server_mock_test.go b/pkg/storage/chunk/client/grpc/grpc_server_mock_test.go index e5ed3456fd081..4f8cb14762669 100644 --- a/pkg/storage/chunk/client/grpc/grpc_server_mock_test.go +++ b/pkg/storage/chunk/client/grpc/grpc_server_mock_test.go @@ -128,7 +128,7 @@ func (s server) UpdateTable(_ context.Context, request *UpdateTableRequest) (*em return &empty.Empty{}, err } -// NewStorageClient returns a new StorageClient. +// NewTestStorageClient returns a new StorageClient. func NewTestStorageClient(cfg Config, schemaCfg config.SchemaConfig) (*StorageClient, error) { grpcClient, _, err := connectToGrpcServer(cfg.Address) if err != nil { From d16a3bf139af16fa53ff2222f5a96741be372bf3 Mon Sep 17 00:00:00 2001 From: Paul Rogers <129207811+paul1r@users.noreply.github.com> Date: Thu, 9 May 2024 12:15:53 -0400 Subject: [PATCH 14/47] test: First pass at unregistering metrics so that we can run multiple tests (#12927) --- .../deletion/delete_requests_client_test.go | 3 +-- pkg/compactor/deletion/metrics.go | 5 ++++ pkg/loki/loki_test.go | 23 +++++++++++++++++++ pkg/pattern/flush_test.go | 3 +-- pkg/scheduler/scheduler_test.go | 2 +- .../client/congestion/congestion_test.go | 16 +++++++++---- .../client/congestion/controller_test.go | 5 ++++ .../chunk/client/congestion/metrics.go | 5 ++++ tools/tsdb/migrate-versions/main_test.go | 1 + 9 files changed, 54 insertions(+), 9 deletions(-) diff --git a/pkg/compactor/deletion/delete_requests_client_test.go b/pkg/compactor/deletion/delete_requests_client_test.go index 2268914711963..299b4661cc94c 100644 --- a/pkg/compactor/deletion/delete_requests_client_test.go +++ b/pkg/compactor/deletion/delete_requests_client_test.go @@ -7,12 +7,11 @@ import ( "testing" "time" - "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) func TestGetCacheGenNumberForUser(t *testing.T) { - deleteClientMetrics := NewDeleteRequestClientMetrics(prometheus.DefaultRegisterer) + deleteClientMetrics := NewDeleteRequestClientMetrics(nil) t.Run("it requests results from the compactor client", func(t *testing.T) { compactorClient := mockCompactorClient{ diff --git a/pkg/compactor/deletion/metrics.go b/pkg/compactor/deletion/metrics.go index 9d89f46c88d9d..c6477062167d0 100644 --- a/pkg/compactor/deletion/metrics.go +++ b/pkg/compactor/deletion/metrics.go @@ -12,6 +12,11 @@ type DeleteRequestClientMetrics struct { deleteRequestsLookupsFailedTotal prometheus.Counter } +func (m DeleteRequestClientMetrics) Unregister() { + prometheus.Unregister(m.deleteRequestsLookupsTotal) + prometheus.Unregister(m.deleteRequestsLookupsFailedTotal) +} + func NewDeleteRequestClientMetrics(r prometheus.Registerer) *DeleteRequestClientMetrics { m := DeleteRequestClientMetrics{} diff --git a/pkg/loki/loki_test.go b/pkg/loki/loki_test.go index a4e6ff73ca565..b29d2aad22065 100644 --- a/pkg/loki/loki_test.go +++ b/pkg/loki/loki_test.go @@ -11,6 +11,12 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/collectors/version" + + "github.com/grafana/loki/v3/pkg/util/constants" + "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/server" "github.com/stretchr/testify/assert" @@ -251,4 +257,21 @@ schema_config: require.NoError(t, err) require.Equal(t, string(bBytes), "abc") assert.True(t, customHandlerInvoked) + unregisterLokiMetrics(loki) +} + +func unregisterLokiMetrics(loki *Loki) { + loki.ClientMetrics.Unregister() + loki.deleteClientMetrics.Unregister() + prometheus.Unregister(version.NewCollector(constants.Loki)) + prometheus.Unregister(collectors.NewGoCollector( + collectors.WithGoCollectorRuntimeMetrics(collectors.MetricsAll), + )) + //TODO Update DSKit to have a method to unregister these metrics + prometheus.Unregister(loki.Metrics.TCPConnections) + prometheus.Unregister(loki.Metrics.TCPConnectionsLimit) + prometheus.Unregister(loki.Metrics.RequestDuration) + prometheus.Unregister(loki.Metrics.ReceivedMessageSize) + prometheus.Unregister(loki.Metrics.SentMessageSize) + prometheus.Unregister(loki.Metrics.InflightRequests) } diff --git a/pkg/pattern/flush_test.go b/pkg/pattern/flush_test.go index 4d70eea5c3e10..9ee4bd436992b 100644 --- a/pkg/pattern/flush_test.go +++ b/pkg/pattern/flush_test.go @@ -12,7 +12,6 @@ import ( "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" "github.com/grafana/dskit/user" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/labels" "github.com/stretchr/testify/require" @@ -23,7 +22,7 @@ import ( ) func TestSweepInstance(t *testing.T) { - ing, err := New(defaultIngesterTestConfig(t), "foo", prometheus.DefaultRegisterer, log.NewNopLogger()) + ing, err := New(defaultIngesterTestConfig(t), "foo", nil, log.NewNopLogger()) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), ing) //nolint:errcheck err = services.StartAndAwaitRunning(context.Background(), ing) diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 7f8d88e4d679e..939aa2b18bb93 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -27,7 +27,7 @@ func TestScheduler_setRunState(t *testing.T) { // we make a Scheduler with the things required to avoid nil pointers s := Scheduler{ log: util_log.Logger, - schedulerRunning: promauto.With(prometheus.DefaultRegisterer).NewGauge(prometheus.GaugeOpts{ + schedulerRunning: promauto.With(nil).NewGauge(prometheus.GaugeOpts{ Name: "cortex_query_scheduler_running", Help: "Value will be 1 if the scheduler is in the ReplicationSet and actively receiving/processing requests", }), diff --git a/pkg/storage/chunk/client/congestion/congestion_test.go b/pkg/storage/chunk/client/congestion/congestion_test.go index c9c9b0d398095..4f86c888d54af 100644 --- a/pkg/storage/chunk/client/congestion/congestion_test.go +++ b/pkg/storage/chunk/client/congestion/congestion_test.go @@ -9,11 +9,13 @@ import ( func TestZeroValueConstruction(t *testing.T) { cfg := Config{} - ctrl := NewController(cfg, log.NewNopLogger(), NewMetrics(t.Name(), cfg)) + m := NewMetrics(t.Name(), cfg) + ctrl := NewController(cfg, log.NewNopLogger(), m) require.IsType(t, &NoopController{}, ctrl) require.IsType(t, &NoopRetrier{}, ctrl.getRetrier()) require.IsType(t, &NoopHedger{}, ctrl.getHedger()) + m.Unregister() } func TestAIMDConstruction(t *testing.T) { @@ -22,11 +24,13 @@ func TestAIMDConstruction(t *testing.T) { Strategy: "aimd", }, } - ctrl := NewController(cfg, log.NewNopLogger(), NewMetrics(t.Name(), cfg)) + m := NewMetrics(t.Name(), cfg) + ctrl := NewController(cfg, log.NewNopLogger(), m) require.IsType(t, &AIMDController{}, ctrl) require.IsType(t, &NoopRetrier{}, ctrl.getRetrier()) require.IsType(t, &NoopHedger{}, ctrl.getHedger()) + m.Unregister() } func TestRetrierConstruction(t *testing.T) { @@ -35,11 +39,13 @@ func TestRetrierConstruction(t *testing.T) { Strategy: "limited", }, } - ctrl := NewController(cfg, log.NewNopLogger(), NewMetrics(t.Name(), cfg)) + m := NewMetrics(t.Name(), cfg) + ctrl := NewController(cfg, log.NewNopLogger(), m) require.IsType(t, &NoopController{}, ctrl) require.IsType(t, &LimitedRetrier{}, ctrl.getRetrier()) require.IsType(t, &NoopHedger{}, ctrl.getHedger()) + m.Unregister() } func TestCombinedConstruction(t *testing.T) { @@ -51,11 +57,13 @@ func TestCombinedConstruction(t *testing.T) { Strategy: "limited", }, } - ctrl := NewController(cfg, log.NewNopLogger(), NewMetrics(t.Name(), cfg)) + m := NewMetrics(t.Name(), cfg) + ctrl := NewController(cfg, log.NewNopLogger(), m) require.IsType(t, &AIMDController{}, ctrl) require.IsType(t, &LimitedRetrier{}, ctrl.getRetrier()) require.IsType(t, &NoopHedger{}, ctrl.getHedger()) + m.Unregister() } func TestHedgerConstruction(t *testing.T) { diff --git a/pkg/storage/chunk/client/congestion/controller_test.go b/pkg/storage/chunk/client/congestion/controller_test.go index 74620d334ff9f..c280b2214718d 100644 --- a/pkg/storage/chunk/client/congestion/controller_test.go +++ b/pkg/storage/chunk/client/congestion/controller_test.go @@ -46,6 +46,7 @@ func TestRequestNoopRetry(t *testing.T) { require.EqualValues(t, 2, testutil.ToFloat64(metrics.requests)) require.EqualValues(t, 0, testutil.ToFloat64(metrics.retries)) + metrics.Unregister() } func TestRequestZeroLimitedRetry(t *testing.T) { @@ -74,6 +75,7 @@ func TestRequestZeroLimitedRetry(t *testing.T) { require.EqualValues(t, 1, testutil.ToFloat64(metrics.requests)) require.EqualValues(t, 0, testutil.ToFloat64(metrics.retries)) + metrics.Unregister() } func TestRequestLimitedRetry(t *testing.T) { @@ -109,6 +111,7 @@ func TestRequestLimitedRetry(t *testing.T) { require.EqualValues(t, 1, testutil.ToFloat64(metrics.retriesExceeded)) require.EqualValues(t, 2, testutil.ToFloat64(metrics.retries)) require.EqualValues(t, 4, testutil.ToFloat64(metrics.requests)) + metrics.Unregister() } func TestRequestLimitedRetryNonRetryableErr(t *testing.T) { @@ -139,6 +142,7 @@ func TestRequestLimitedRetryNonRetryableErr(t *testing.T) { require.EqualValues(t, 0, testutil.ToFloat64(metrics.retries)) require.EqualValues(t, 1, testutil.ToFloat64(metrics.nonRetryableErrors)) require.EqualValues(t, 1, testutil.ToFloat64(metrics.requests)) + metrics.Unregister() } func TestAIMDReducedThroughput(t *testing.T) { @@ -212,6 +216,7 @@ func TestAIMDReducedThroughput(t *testing.T) { // should have registered some congestion latency in stats require.NotZero(t, statsCtx.Store().CongestionControlLatency) + metrics.Unregister() } func runAndMeasureRate(ctx context.Context, ctrl Controller, duration time.Duration) (float64, float64) { diff --git a/pkg/storage/chunk/client/congestion/metrics.go b/pkg/storage/chunk/client/congestion/metrics.go index 78684a4e40893..2e41d5e0bd5e5 100644 --- a/pkg/storage/chunk/client/congestion/metrics.go +++ b/pkg/storage/chunk/client/congestion/metrics.go @@ -17,6 +17,11 @@ type Metrics struct { func (m Metrics) Unregister() { prometheus.Unregister(m.currentLimit) + prometheus.Unregister(m.backoffSec) + prometheus.Unregister(m.requests) + prometheus.Unregister(m.retries) + prometheus.Unregister(m.nonRetryableErrors) + prometheus.Unregister(m.retriesExceeded) } // NewMetrics creates metrics to be used for monitoring congestion control. diff --git a/tools/tsdb/migrate-versions/main_test.go b/tools/tsdb/migrate-versions/main_test.go index 7ac68521545bd..62519e04f61fc 100644 --- a/tools/tsdb/migrate-versions/main_test.go +++ b/tools/tsdb/migrate-versions/main_test.go @@ -141,4 +141,5 @@ func TestMigrateTables(t *testing.T) { } }) } + clientMetrics.Unregister() } From 4d761acd85b90cbdcafdf8d2547f0db14f6ae4dd Mon Sep 17 00:00:00 2001 From: Paulin Todev Date: Thu, 9 May 2024 18:56:08 +0100 Subject: [PATCH 15/47] fix(promtail): Fix bug with Promtail config reloading getting stuck indefinitely (#12795) Signed-off-by: Paulin Todev --- .../pkg/promtail/targets/file/filetarget.go | 35 ++++++-- .../promtail/targets/file/filetarget_test.go | 87 +++++++++++++++++++ 2 files changed, 116 insertions(+), 6 deletions(-) diff --git a/clients/pkg/promtail/targets/file/filetarget.go b/clients/pkg/promtail/targets/file/filetarget.go index 0ade51902b492..ffa168fde43d2 100644 --- a/clients/pkg/promtail/targets/file/filetarget.go +++ b/clients/pkg/promtail/targets/file/filetarget.go @@ -25,6 +25,8 @@ const ( FilenameLabel = "filename" ) +var errFileTargetStopped = errors.New("File target is stopped") + // Config describes behavior for Target type Config struct { SyncPeriod time.Duration `mapstructure:"sync_period" yaml:"sync_period"` @@ -223,6 +225,11 @@ func (t *FileTarget) run() { } case <-ticker.C: err := t.sync() + if errors.Is(err, errFileTargetStopped) { + // This file target has been stopped. + // This is normal and there is no need to log an error. + return + } if err != nil { level.Error(t.logger).Log("msg", "error running sync function", "error", err) } @@ -291,14 +298,20 @@ func (t *FileTarget) sync() error { t.watchesMutex.Lock() toStartWatching := missing(t.watches, dirs) t.watchesMutex.Unlock() - t.startWatching(toStartWatching) + err := t.startWatching(toStartWatching) + if errors.Is(err, errFileTargetStopped) { + return err + } // Remove any directories which no longer need watching. t.watchesMutex.Lock() toStopWatching := missing(dirs, t.watches) t.watchesMutex.Unlock() - t.stopWatching(toStopWatching) + err = t.stopWatching(toStopWatching) + if errors.Is(err, errFileTargetStopped) { + return err + } // fsnotify.Watcher doesn't allow us to see what is currently being watched so we have to track it ourselves. t.watchesMutex.Lock() @@ -321,32 +334,42 @@ func (t *FileTarget) sync() error { return nil } -func (t *FileTarget) startWatching(dirs map[string]struct{}) { +func (t *FileTarget) startWatching(dirs map[string]struct{}) error { for dir := range dirs { if _, ok := t.getWatch(dir); ok { continue } level.Info(t.logger).Log("msg", "watching new directory", "directory", dir) - t.targetEventHandler <- fileTargetEvent{ + select { + case <-t.quit: + return errFileTargetStopped + case t.targetEventHandler <- fileTargetEvent{ path: dir, eventType: fileTargetEventWatchStart, + }: } } + return nil } -func (t *FileTarget) stopWatching(dirs map[string]struct{}) { +func (t *FileTarget) stopWatching(dirs map[string]struct{}) error { for dir := range dirs { if _, ok := t.getWatch(dir); !ok { continue } level.Info(t.logger).Log("msg", "removing directory from watcher", "directory", dir) - t.targetEventHandler <- fileTargetEvent{ + select { + case <-t.quit: + return errFileTargetStopped + case t.targetEventHandler <- fileTargetEvent{ path: dir, eventType: fileTargetEventWatchStop, + }: } } + return nil } func (t *FileTarget) startTailing(ps []string) { diff --git a/clients/pkg/promtail/targets/file/filetarget_test.go b/clients/pkg/promtail/targets/file/filetarget_test.go index 579ea19e2e56e..caf33395ba201 100644 --- a/clients/pkg/promtail/targets/file/filetarget_test.go +++ b/clients/pkg/promtail/targets/file/filetarget_test.go @@ -336,6 +336,93 @@ func TestFileTarget_StopsTailersCleanly_Parallel(t *testing.T) { ps.Stop() } +// Make sure that Stop() doesn't hang if FileTarget is waiting on a channel send. +func TestFileTarget_StopAbruptly(t *testing.T) { + w := log.NewSyncWriter(os.Stderr) + logger := log.NewLogfmtLogger(w) + + dirName := newTestLogDirectories(t) + positionsFileName := filepath.Join(dirName, "positions.yml") + logDir1 := filepath.Join(dirName, "log1") + logDir2 := filepath.Join(dirName, "log2") + logDir3 := filepath.Join(dirName, "log3") + + logfile1 := filepath.Join(logDir1, "test1.log") + logfile2 := filepath.Join(logDir2, "test1.log") + logfile3 := filepath.Join(logDir3, "test1.log") + + ps, err := positions.New(logger, positions.Config{ + SyncPeriod: 10 * time.Millisecond, + PositionsFile: positionsFileName, + }) + require.NoError(t, err) + + client := fake.New(func() {}) + defer client.Stop() + + // fakeHandler has to be a buffered channel so that we can call the len() function on it. + // We need to call len() to check if the channel is full. + fakeHandler := make(chan fileTargetEvent, 1) + pathToWatch := filepath.Join(dirName, "**", "*.log") + registry := prometheus.NewRegistry() + target, err := NewFileTarget(NewMetrics(registry), logger, client, ps, pathToWatch, "", nil, nil, &Config{ + SyncPeriod: 10 * time.Millisecond, + }, DefaultWatchConig, nil, fakeHandler, "", nil) + assert.NoError(t, err) + + // Create a directory, still nothing is watched. + err = os.MkdirAll(logDir1, 0750) + assert.NoError(t, err) + _, err = os.Create(logfile1) + assert.NoError(t, err) + + // There should be only one WatchStart event in the channel so far. + ftEvent := <-fakeHandler + require.Equal(t, fileTargetEventWatchStart, ftEvent.eventType) + + requireEventually(t, func() bool { + return target.getReadersLen() == 1 + }, "expected 1 tailer to be created") + + require.NoError(t, testutil.GatherAndCompare(registry, bytes.NewBufferString(` + # HELP promtail_files_active_total Number of active files. + # TYPE promtail_files_active_total gauge + promtail_files_active_total 1 + `), "promtail_files_active_total")) + + // Create two directories - one more than the buffer of fakeHandler, + // so that the file target hands until we call Stop(). + err = os.MkdirAll(logDir2, 0750) + assert.NoError(t, err) + _, err = os.Create(logfile2) + assert.NoError(t, err) + + err = os.MkdirAll(logDir3, 0750) + assert.NoError(t, err) + _, err = os.Create(logfile3) + assert.NoError(t, err) + + // Wait until the file target is waiting on a channel send due to a full channel buffer. + requireEventually(t, func() bool { + return len(fakeHandler) == 1 + }, "expected an event in the fakeHandler channel") + + // If FileHandler works well, then it will stop waiting for + // the blocked fakeHandler and stop cleanly. + // This is why this time we don't drain fakeHandler. + requireEventually(t, func() bool { + target.Stop() + ps.Stop() + return true + }, "expected FileTarget not to hang") + + require.NoError(t, testutil.GatherAndCompare(registry, bytes.NewBufferString(` + # HELP promtail_files_active_total Number of active files. + # TYPE promtail_files_active_total gauge + promtail_files_active_total 0 + `), "promtail_files_active_total")) +} + func TestFileTargetPathExclusion(t *testing.T) { w := log.NewSyncWriter(os.Stderr) logger := log.NewLogfmtLogger(w) From a46d14fb05ea14dd39095d2d71cd037acc2dfc51 Mon Sep 17 00:00:00 2001 From: jackyin <648588267@qq.com> Date: Fri, 10 May 2024 16:55:01 +0800 Subject: [PATCH 16/47] fix: Optimize regular initialization (#12926) --- pkg/querier/querier.go | 14 +++++---- pkg/querier/querier_test.go | 29 +++++++++++++++++++ pkg/ruler/base/notifier.go | 7 +++-- .../resultscache/pipelinewrapper_keygen.go | 1 + .../pipelinewrapper_keygen_test.go | 6 ++-- 5 files changed, 47 insertions(+), 10 deletions(-) diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 0223b89b1b388..12429e68a3e0c 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -50,9 +50,15 @@ const ( // before checking if a new entry is available (to avoid spinning the CPU in a continuous // check loop) tailerWaitEntryThrottle = time.Second / 2 + + idPattern = `^(?:(?:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})|(?:(?:\{)?[0-9a-fA-F]{8}(?:-?[0-9a-fA-F]{4}){3}-?[0-9a-fA-F]{12}(?:\})?)|(\d+(?:\.\d+)?))$` ) -var nowFunc = func() time.Time { return time.Now() } +var ( + nowFunc = func() time.Time { return time.Now() } + + idRegexp = regexp.MustCompile(idPattern) +) type interval struct { start, end time.Time @@ -1046,12 +1052,8 @@ func (q *SingleTenantQuerier) isLabelRelevant(label string, values []string, sta // containsAllIDTypes filters out all UUID, GUID and numeric types. Returns false if even one value is not of the type func containsAllIDTypes(values []string) bool { - pattern := `^(?:(?:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})|(?:(?:\{)?[0-9a-fA-F]{8}(?:-?[0-9a-fA-F]{4}){3}-?[0-9a-fA-F]{12}(?:\})?)|(\d+(?:\.\d+)?))$` - - re := regexp.MustCompile(pattern) - for _, v := range values { - if !re.MatchString(v) { + if !idRegexp.MatchString(v) { return false } } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index e6c228f04920e..2dfd3d8974314 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -1425,6 +1425,35 @@ func TestQuerier_isLabelRelevant(t *testing.T) { } } +func TestQuerier_containsAllIDTypes(t *testing.T) { + for _, tc := range []struct { + name string + values []string + expected bool + }{ + { + name: "all uuidv4 values are valid", + values: []string{"751e8ee6-b377-4b2e-b7b5-5508fbe980ef", "6b7e2663-8ecb-42e1-8bdc-0c5de70185b3", "2e1e67ff-be4f-47b8-aee1-5d67ff1ddabf", "c95b2d62-74ed-4ed7-a8a1-eb72fc67946e"}, + expected: true, + }, + { + name: "one uuidv4 values are invalid", + values: []string{"w", "5076e837-cd8d-4dd7-95ff-fecb087dccf6", "2e2a6554-1744-4399-b89a-88ae79c27096", "d3c31248-ec0c-4bc4-b11c-8fb1cfb42e62"}, + expected: false, + }, + { + name: "all uuidv4 values are invalid", + values: []string{"w", "x", "y", "z"}, + expected: false, + }, + } { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.expected, containsAllIDTypes(tc.values)) + }) + + } +} + func TestQuerier_DetectedLabels(t *testing.T) { manyValues := []string{} now := time.Now() diff --git a/pkg/ruler/base/notifier.go b/pkg/ruler/base/notifier.go index 8fea76be13963..5c6524447bfed 100644 --- a/pkg/ruler/base/notifier.go +++ b/pkg/ruler/base/notifier.go @@ -25,7 +25,11 @@ import ( // TODO: Instead of using the same metrics for all notifiers, // should we have separate metrics for each discovery.NewManager? -var sdMetrics map[string]discovery.DiscovererMetrics +var ( + sdMetrics map[string]discovery.DiscovererMetrics + + srvDNSregexp = regexp.MustCompile(`^_.+._.+`) +) func init() { var err error @@ -112,7 +116,6 @@ func buildNotifierConfig(amConfig *ruler_config.AlertManagerConfig, externalLabe amURLs := strings.Split(amConfig.AlertmanagerURL, ",") validURLs := make([]*url.URL, 0, len(amURLs)) - srvDNSregexp := regexp.MustCompile(`^_.+._.+`) for _, h := range amURLs { url, err := url.Parse(h) if err != nil { diff --git a/pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen.go b/pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen.go index e3681d961effe..b87fed3734215 100644 --- a/pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen.go +++ b/pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen.go @@ -2,6 +2,7 @@ package resultscache import ( "context" + "github.com/grafana/loki/v3/pkg/util/httpreq" ) diff --git a/pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen_test.go b/pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen_test.go index 621a77d859072..b113df932f38c 100644 --- a/pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen_test.go +++ b/pkg/storage/chunk/cache/resultscache/pipelinewrapper_keygen_test.go @@ -2,9 +2,11 @@ package resultscache import ( "context" - "github.com/grafana/loki/v3/pkg/util/httpreq" - "github.com/stretchr/testify/require" "testing" + + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/v3/pkg/util/httpreq" ) func TestPipelineWrapperKeygen(t *testing.T) { From cb1f5d9fca2908bd31a3c6bef38d49fe084d2939 Mon Sep 17 00:00:00 2001 From: Salva Corts Date: Fri, 10 May 2024 15:27:37 +0200 Subject: [PATCH 17/47] fix: Defer closing blocks iter after checking error from loadWorkForGap (#12934) --- pkg/bloomcompactor/controller.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pkg/bloomcompactor/controller.go b/pkg/bloomcompactor/controller.go index c706f9ae72b56..f9defdc1fdfbc 100644 --- a/pkg/bloomcompactor/controller.go +++ b/pkg/bloomcompactor/controller.go @@ -384,6 +384,10 @@ func (s *SimpleBloomController) buildGaps( // to try and accelerate bloom creation level.Debug(logger).Log("msg", "loading series and blocks for gap", "blocks", len(gap.blocks)) seriesItr, blocksIter, err := s.loadWorkForGap(ctx, table, tenant, plan.tsdb, gap) + if err != nil { + level.Error(logger).Log("msg", "failed to get series and blocks", "err", err) + return nil, errors.Wrap(err, "failed to get series and blocks") + } // TODO(owen-d): more elegant error handling than sync.OnceFunc closeBlocksIter := sync.OnceFunc(func() { @@ -393,11 +397,6 @@ func (s *SimpleBloomController) buildGaps( }) defer closeBlocksIter() - if err != nil { - level.Error(logger).Log("msg", "failed to get series and blocks", "err", err) - return nil, errors.Wrap(err, "failed to get series and blocks") - } - // Blocks are built consuming the series iterator. For observability, we wrap the series iterator // with a counter iterator to count the number of times Next() is called on it. // This is used to observe the number of series that are being processed. From 5ada92b190c671055bb09ca2dd234b6bac49289e Mon Sep 17 00:00:00 2001 From: Vladyslav Diachenko <82767850+vlad-diachenko@users.noreply.github.com> Date: Fri, 10 May 2024 18:16:18 +0300 Subject: [PATCH 18/47] fix(helm): fixed ingress paths mapping (#12932) --- docs/sources/setup/install/helm/reference.md | 94 ++++++++++++++++---- production/helm/loki/CHANGELOG.md | 4 + production/helm/loki/Chart.yaml | 2 +- production/helm/loki/README.md | 2 +- production/helm/loki/templates/_helpers.tpl | 68 +++++++++----- production/helm/loki/values.yaml | 49 +++++++--- 6 files changed, 165 insertions(+), 54 deletions(-) diff --git a/docs/sources/setup/install/helm/reference.md b/docs/sources/setup/install/helm/reference.md index 2a538a7516175..53101a4832143 100644 --- a/docs/sources/setup/install/helm/reference.md +++ b/docs/sources/setup/install/helm/reference.md @@ -5086,29 +5086,37 @@ null "ingressClassName": "", "labels": {}, "paths": { - "read": [ - "/api/prom/tail", - "/loki/api/v1/tail", - "/loki/api", - "/api/prom/rules", - "/loki/api/v1/rules", - "/prometheus/api/v1/rules", - "/prometheus/api/v1/alerts" - ], - "singleBinary": [ + "distributor": [ "/api/prom/push", "/loki/api/v1/push", + "/otlp/v1/logs" + ], + "queryFrontend": [ + "/api/prom/query", + "/api/prom/label", + "/api/prom/series", "/api/prom/tail", + "/loki/api/v1/query", + "/loki/api/v1/query_range", "/loki/api/v1/tail", - "/loki/api", + "/loki/api/v1/label", + "/loki/api/v1/labels", + "/loki/api/v1/series", + "/loki/api/v1/index/stats", + "/loki/api/v1/index/volume", + "/loki/api/v1/index/volume_range", + "/loki/api/v1/format_query", + "/loki/api/v1/detected_fields", + "/loki/api/v1/detected_labels", + "/loki/api/v1/patterns" + ], + "ruler": [ "/api/prom/rules", + "/api/prom/api/v1/rules", + "/api/prom/api/v1/alerts", "/loki/api/v1/rules", "/prometheus/api/v1/rules", "/prometheus/api/v1/alerts" - ], - "write": [ - "/api/prom/push", - "/loki/api/v1/push" ] }, "tls": [] @@ -5125,6 +5133,62 @@ null "loki.example.com" ] + + + + ingress.paths.distributor + list + Paths that are exposed by Loki Distributor. If deployment mode is Distributed, the requests are forwarded to the service: `{{"loki.distributorFullname"}}`. If deployment mode is SimpleScalable, the requests are forwarded to write k8s service: `{{"loki.writeFullname"}}`. If deployment mode is SingleBinary, the requests are forwarded to the central/single k8s service: `{{"loki.singleBinaryFullname"}}` +
+[
+  "/api/prom/push",
+  "/loki/api/v1/push",
+  "/otlp/v1/logs"
+]
+
+ + + + ingress.paths.queryFrontend + list + Paths that are exposed by Loki Query Frontend. If deployment mode is Distributed, the requests are forwarded to the service: `{{"loki.queryFrontendFullname"}}`. If deployment mode is SimpleScalable, the requests are forwarded to write k8s service: `{{"loki.readFullname"}}`. If deployment mode is SingleBinary, the requests are forwarded to the central/single k8s service: `{{"loki.singleBinaryFullname"}}` +
+[
+  "/api/prom/query",
+  "/api/prom/label",
+  "/api/prom/series",
+  "/api/prom/tail",
+  "/loki/api/v1/query",
+  "/loki/api/v1/query_range",
+  "/loki/api/v1/tail",
+  "/loki/api/v1/label",
+  "/loki/api/v1/labels",
+  "/loki/api/v1/series",
+  "/loki/api/v1/index/stats",
+  "/loki/api/v1/index/volume",
+  "/loki/api/v1/index/volume_range",
+  "/loki/api/v1/format_query",
+  "/loki/api/v1/detected_fields",
+  "/loki/api/v1/detected_labels",
+  "/loki/api/v1/patterns"
+]
+
+ + + + ingress.paths.ruler + list + Paths that are exposed by Loki Ruler. If deployment mode is Distributed, the requests are forwarded to the service: `{{"loki.rulerFullname"}}`. If deployment mode is SimpleScalable, the requests are forwarded to k8s service: `{{"loki.backendFullname"}}`. If deployment mode is SimpleScalable but `read.legacyReadTarget` is `true`, the requests are forwarded to k8s service: `{{"loki.readFullname"}}`. If deployment mode is SingleBinary, the requests are forwarded to the central/single k8s service: `{{"loki.singleBinaryFullname"}}` +
+[
+  "/api/prom/rules",
+  "/api/prom/api/v1/rules",
+  "/api/prom/api/v1/alerts",
+  "/loki/api/v1/rules",
+  "/prometheus/api/v1/rules",
+  "/prometheus/api/v1/alerts"
+]
+
diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index 8a9f00cf7753e..1606c89914f88 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 6.5.2 + +- [BUGFIX] Fixed Ingress routing for all deployment modes. + ## 6.5.0 - [CHANGE] Changed version of Grafana Enterprise Logs to v3.0.1 diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 595c4b5710cd5..989a54d146a1d 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. type: application appVersion: 3.0.0 -version: 6.5.1 +version: 6.5.2 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 9e0dce69d8385..55a7256c72f7f 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 6.5.1](https://img.shields.io/badge/Version-6.5.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) +![Version: 6.5.2](https://img.shields.io/badge/Version-6.5.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.0.0](https://img.shields.io/badge/AppVersion-3.0.0-informational?style=flat-square) Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. diff --git a/production/helm/loki/templates/_helpers.tpl b/production/helm/loki/templates/_helpers.tpl index c71c6c23aff88..572ef9a5e7846 100644 --- a/production/helm/loki/templates/_helpers.tpl +++ b/production/helm/loki/templates/_helpers.tpl @@ -607,33 +607,68 @@ Return if ingress supports pathType. Generate list of ingress service paths based on deployment type */}} {{- define "loki.ingress.servicePaths" -}} -{{- if (eq (include "loki.deployment.isScalable" .) "true") -}} +{{- if (eq (include "loki.deployment.isSingleBinary" .) "true") -}} +{{- include "loki.ingress.singleBinaryServicePaths" . }} +{{- else if (eq (include "loki.deployment.isDistributed" .) "true") -}} +{{- include "loki.ingress.distributedServicePaths" . }} +{{- else if and (eq (include "loki.deployment.isScalable" .) "true") (not .Values.read.legacyReadTarget ) -}} {{- include "loki.ingress.scalableServicePaths" . }} {{- else -}} -{{- include "loki.ingress.singleBinaryServicePaths" . }} +{{- include "loki.ingress.legacyScalableServicePaths" . }} +{{- end -}} {{- end -}} + + +{{/* +Ingress service paths for distributed deployment +*/}} +{{- define "loki.ingress.distributedServicePaths" -}} +{{- $distributorServiceName := include "loki.distributorFullname" . }} +{{- include "loki.ingress.servicePath" (dict "ctx" . "serviceName" $distributorServiceName "paths" .Values.ingress.paths.distributor )}} +{{- $queryFrontendServiceName := include "loki.queryFrontendFullname" . }} +{{- include "loki.ingress.servicePath" (dict "ctx" . "serviceName" $queryFrontendServiceName "paths" .Values.ingress.paths.queryFrontend )}} +{{- $rulerServiceName := include "loki.rulerFullname" . }} +{{- include "loki.ingress.servicePath" (dict "ctx" . "serviceName" $rulerServiceName "paths" .Values.ingress.paths.ruler)}} {{- end -}} {{/* -Ingress service paths for scalable deployment +Ingress service paths for legacy simple scalable deployment when backend components were part of read component. */}} {{- define "loki.ingress.scalableServicePaths" -}} -{{- include "loki.ingress.servicePath" (dict "ctx" . "svcName" "read" "paths" .Values.ingress.paths.read )}} -{{- include "loki.ingress.servicePath" (dict "ctx" . "svcName" "write" "paths" .Values.ingress.paths.write )}} +{{- $readServiceName := include "loki.readFullname" . }} +{{- include "loki.ingress.servicePath" (dict "ctx" . "serviceName" $readServiceName "paths" .Values.ingress.paths.queryFrontend )}} +{{- $writeServiceName := include "loki.writeFullname" . }} +{{- include "loki.ingress.servicePath" (dict "ctx" . "serviceName" $writeServiceName "paths" .Values.ingress.paths.distributor )}} +{{- $backendServiceName := include "loki.backendFullname" . }} +{{- include "loki.ingress.servicePath" (dict "ctx" . "serviceName" $backendServiceName "paths" .Values.ingress.paths.ruler )}} +{{- end -}} + +{{/* +Ingress service paths for legacy simple scalable deployment +*/}} +{{- define "loki.ingress.legacyScalableServicePaths" -}} +{{- $readServiceName := include "loki.readFullname" . }} +{{- include "loki.ingress.servicePath" (dict "ctx" . "serviceName" $readServiceName "paths" .Values.ingress.paths.queryFrontend )}} +{{- include "loki.ingress.servicePath" (dict "ctx" . "serviceName" $readServiceName "paths" .Values.ingress.paths.ruler )}} +{{- $writeServiceName := include "loki.writeFullname" . }} +{{- include "loki.ingress.servicePath" (dict "ctx" . "serviceName" $writeServiceName "paths" .Values.ingress.paths.distributor )}} {{- end -}} {{/* Ingress service paths for single binary deployment */}} {{- define "loki.ingress.singleBinaryServicePaths" -}} -{{- include "loki.ingress.servicePath" (dict "ctx" . "svcName" "singleBinary" "paths" .Values.ingress.paths.singleBinary )}} +{{- $serviceName := include "loki.singleBinaryFullname" . }} +{{- include "loki.ingress.servicePath" (dict "ctx" . "serviceName" $serviceName "paths" .Values.ingress.paths.distributor )}} +{{- include "loki.ingress.servicePath" (dict "ctx" . "serviceName" $serviceName "paths" .Values.ingress.paths.queryFrontend )}} +{{- include "loki.ingress.servicePath" (dict "ctx" . "serviceName" $serviceName "paths" .Values.ingress.paths.ruler )}} {{- end -}} {{/* Ingress service path helper function Params: ctx = . context - svcName = service name without the "loki.fullname" part (ie. read, write) + serviceName = fully qualified k8s service name paths = list of url paths to allow ingress for */}} {{- define "loki.ingress.servicePath" -}} @@ -645,33 +680,18 @@ Params: pathType: Prefix {{- end }} backend: - {{- $serviceName := include "loki.ingress.serviceName" (dict "ctx" $.ctx "svcName" $.svcName) }} {{- if $ingressApiIsStable }} service: - name: {{ $serviceName }} + name: {{ $.serviceName }} port: number: {{ $.ctx.Values.loki.server.http_listen_port }} {{- else }} - serviceName: {{ $serviceName }} + serviceName: {{ $.serviceName }} servicePort: {{ $.ctx.Values.loki.server.http_listen_port }} {{- end -}} {{- end -}} {{- end -}} -{{/* -Ingress service name helper function -Params: - ctx = . context - svcName = service name without the "loki.fullname" part (ie. read, write) -*/}} -{{- define "loki.ingress.serviceName" -}} -{{- if (eq .svcName "singleBinary") }} -{{- printf "%s" (include "loki.singleBinaryFullname" .ctx) }} -{{- else }} -{{- printf "%s-%s" (include "loki.name" .ctx) .svcName }} -{{- end -}} -{{- end -}} - {{/* Create the service endpoint including port for MinIO. */}} diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index 11e579e6c885c..3edfc24ba34fb 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -1131,24 +1131,47 @@ ingress: labels: {} # blackbox.monitoring.exclude: "true" paths: - write: - - /api/prom/push - - /loki/api/v1/push - read: - - /api/prom/tail - - /loki/api/v1/tail - - /loki/api - - /api/prom/rules - - /loki/api/v1/rules - - /prometheus/api/v1/rules - - /prometheus/api/v1/alerts - singleBinary: + # -- Paths that are exposed by Loki Distributor. + # If deployment mode is Distributed, the requests are forwarded to the service: `{{"loki.distributorFullname"}}`. + # If deployment mode is SimpleScalable, the requests are forwarded to write k8s service: `{{"loki.writeFullname"}}`. + # If deployment mode is SingleBinary, the requests are forwarded to the central/single k8s service: `{{"loki.singleBinaryFullname"}}` + distributor: - /api/prom/push - /loki/api/v1/push + - /otlp/v1/logs + # -- Paths that are exposed by Loki Query Frontend. + # If deployment mode is Distributed, the requests are forwarded to the service: `{{"loki.queryFrontendFullname"}}`. + # If deployment mode is SimpleScalable, the requests are forwarded to write k8s service: `{{"loki.readFullname"}}`. + # If deployment mode is SingleBinary, the requests are forwarded to the central/single k8s service: `{{"loki.singleBinaryFullname"}}` + queryFrontend: + - /api/prom/query + # this path covers labels and labelValues endpoints + - /api/prom/label + - /api/prom/series - /api/prom/tail + - /loki/api/v1/query + - /loki/api/v1/query_range - /loki/api/v1/tail - - /loki/api + # this path covers labels and labelValues endpoints + - /loki/api/v1/label + - /loki/api/v1/labels + - /loki/api/v1/series + - /loki/api/v1/index/stats + - /loki/api/v1/index/volume + - /loki/api/v1/index/volume_range + - /loki/api/v1/format_query + - /loki/api/v1/detected_fields + - /loki/api/v1/detected_labels + - /loki/api/v1/patterns + # -- Paths that are exposed by Loki Ruler. + # If deployment mode is Distributed, the requests are forwarded to the service: `{{"loki.rulerFullname"}}`. + # If deployment mode is SimpleScalable, the requests are forwarded to k8s service: `{{"loki.backendFullname"}}`. + # If deployment mode is SimpleScalable but `read.legacyReadTarget` is `true`, the requests are forwarded to k8s service: `{{"loki.readFullname"}}`. + # If deployment mode is SingleBinary, the requests are forwarded to the central/single k8s service: `{{"loki.singleBinaryFullname"}}` + ruler: - /api/prom/rules + - /api/prom/api/v1/rules + - /api/prom/api/v1/alerts - /loki/api/v1/rules - /prometheus/api/v1/rules - /prometheus/api/v1/alerts From b05c4f728839f0de1b8386394c99d54c5fc03cc1 Mon Sep 17 00:00:00 2001 From: Kaviraj Kanagaraj Date: Mon, 13 May 2024 08:41:17 +0200 Subject: [PATCH 19/47] chore(logging): Add entry's timestamp when rejected with `too far behind` (#12933) Signed-off-by: Kaviraj --- pkg/chunkenc/interface.go | 10 +++++++--- pkg/chunkenc/interface_test.go | 4 +++- pkg/ingester/stream.go | 2 +- pkg/ingester/stream_test.go | 3 ++- 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/pkg/chunkenc/interface.go b/pkg/chunkenc/interface.go index b96d9f705d092..8d6f5e1e8dd60 100644 --- a/pkg/chunkenc/interface.go +++ b/pkg/chunkenc/interface.go @@ -24,6 +24,10 @@ var ( ) type errTooFarBehind struct { + // original timestmap of the entry itself. + entryTs time.Time + + // cutoff is the oldest acceptable timstamp of the `stream` that entry belongs to. cutoff time.Time } @@ -32,12 +36,12 @@ func IsErrTooFarBehind(err error) bool { return ok } -func ErrTooFarBehind(cutoff time.Time) error { - return &errTooFarBehind{cutoff: cutoff} +func ErrTooFarBehind(entryTs, cutoff time.Time) error { + return &errTooFarBehind{entryTs: entryTs, cutoff: cutoff} } func (m *errTooFarBehind) Error() string { - return "entry too far behind, oldest acceptable timestamp is: " + m.cutoff.Format(time.RFC3339) + return fmt.Sprintf("entry too far behind, entry timestamp is: %s, oldest acceptable timestamp is: %s", m.entryTs.Format(time.RFC3339), m.cutoff.Format(time.RFC3339)) } func IsOutOfOrderErr(err error) bool { diff --git a/pkg/chunkenc/interface_test.go b/pkg/chunkenc/interface_test.go index daea36cb38e72..ed81c4d3604e4 100644 --- a/pkg/chunkenc/interface_test.go +++ b/pkg/chunkenc/interface_test.go @@ -31,7 +31,9 @@ func TestParseEncoding(t *testing.T) { } func TestIsOutOfOrderErr(t *testing.T) { - for _, err := range []error{ErrOutOfOrder, ErrTooFarBehind(time.Now())} { + now := time.Now() + + for _, err := range []error{ErrOutOfOrder, ErrTooFarBehind(now, now)} { require.Equal(t, true, IsOutOfOrderErr(err)) } } diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go index d7a29b73e802d..6bf75dfa1ac54 100644 --- a/pkg/ingester/stream.go +++ b/pkg/ingester/stream.go @@ -394,7 +394,7 @@ func (s *stream) validateEntries(entries []logproto.Entry, isReplay, rateLimitWh // The validity window for unordered writes is the highest timestamp present minus 1/2 * max-chunk-age. cutoff := highestTs.Add(-s.cfg.MaxChunkAge / 2) if !isReplay && s.unorderedWrites && !highestTs.IsZero() && cutoff.After(entries[i].Timestamp) { - failedEntriesWithError = append(failedEntriesWithError, entryWithError{&entries[i], chunkenc.ErrTooFarBehind(cutoff)}) + failedEntriesWithError = append(failedEntriesWithError, entryWithError{&entries[i], chunkenc.ErrTooFarBehind(entries[i].Timestamp, cutoff)}) s.writeFailures.Log(s.tenant, fmt.Errorf("%w for stream %s", failedEntriesWithError[len(failedEntriesWithError)-1].e, s.labels)) outOfOrderSamples++ outOfOrderBytes += lineBytes diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go index 26eef4e3a7936..af877bf88da9e 100644 --- a/pkg/ingester/stream_test.go +++ b/pkg/ingester/stream_test.go @@ -84,8 +84,9 @@ func TestMaxReturnedStreamsErrors(t *testing.T) { var expected bytes.Buffer for i := 0; i < tc.expectErrs; i++ { fmt.Fprintf(&expected, - "entry with timestamp %s ignored, reason: 'entry too far behind, oldest acceptable timestamp is: %s',\n", + "entry with timestamp %s ignored, reason: 'entry too far behind, entry timestamp is: %s, oldest acceptable timestamp is: %s',\n", time.Unix(int64(i), 0).String(), + newLines[i].Timestamp.Format(time.RFC3339), time.Unix(int64(numLogs), 0).Format(time.RFC3339), ) } From 3cc28aaf0ec08373fb104327827e6a062807e7ff Mon Sep 17 00:00:00 2001 From: Shantanu Alshi Date: Mon, 13 May 2024 15:56:08 +0530 Subject: [PATCH 20/47] fix: panics when ingester response is nil (#12946) --- pkg/querier/ingester_querier.go | 4 ++++ pkg/querier/querier.go | 12 ++++++------ pkg/querier/querier_test.go | 27 +++++++++++++++++++++++++++ 3 files changed, 37 insertions(+), 6 deletions(-) diff --git a/pkg/querier/ingester_querier.go b/pkg/querier/ingester_querier.go index e99fe6882df4c..c18ca77930667 100644 --- a/pkg/querier/ingester_querier.go +++ b/pkg/querier/ingester_querier.go @@ -380,6 +380,10 @@ func (q *IngesterQuerier) DetectedLabel(ctx context.Context, req *logproto.Detec "response", resp) } + if thisIngester == nil { + continue + } + for label, thisIngesterValues := range thisIngester.Labels { var combinedValues []string allIngesterValues, isLabelPresent := labelMap[label] diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 12429e68a3e0c..bafbe334cdf75 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -985,14 +985,14 @@ func (q *SingleTenantQuerier) DetectedLabels(ctx context.Context, req *logproto. }, nil } - // append static labels before so they are in sorted order - for l := range staticLabels { - if values, present := ingesterLabels.Labels[l]; present { - detectedLabels = append(detectedLabels, &logproto.DetectedLabel{Label: l, Cardinality: uint64(len(values.Values))}) + if ingesterLabels != nil { + // append static labels before so they are in sorted order + for l := range staticLabels { + if values, present := ingesterLabels.Labels[l]; present { + detectedLabels = append(detectedLabels, &logproto.DetectedLabel{Label: l, Cardinality: uint64(len(values.Values))}) + } } - } - if ingesterLabels != nil { for label, values := range ingesterLabels.Labels { if q.isLabelRelevant(label, values.Values, staticLabels) { combinedValues := values.Values diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 2dfd3d8974314..66370c34460be 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -1714,4 +1714,31 @@ func TestQuerier_DetectedLabels(t *testing.T) { assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "pod", Cardinality: 4}) assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "namespace", Cardinality: 60}) }) + + t.Run("no panics with ingester response is nil", func(t *testing.T) { + ingesterClient := newQuerierClientMock() + storeClient := newStoreMock() + + ingesterClient.On("GetDetectedLabels", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil, nil) + storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return([]string{}, nil) + request := logproto.DetectedLabelsRequest{ + Start: &now, + End: &now, + Query: "", + } + + querier, err := newQuerier( + conf, + mockIngesterClientConfig(), + newIngesterClientMockFactory(ingesterClient), + mockReadRingWithOneActiveIngester(), + &mockDeleteGettter{}, + storeClient, limits) + require.NoError(t, err) + + _, err = querier.DetectedLabels(ctx, &request) + require.NoError(t, err) + }) } From 5cd850e0d02151c6f9c6285189b887b4929cfa12 Mon Sep 17 00:00:00 2001 From: Paul Rogers <129207811+paul1r@users.noreply.github.com> Date: Mon, 13 May 2024 07:38:46 -0400 Subject: [PATCH 21/47] fix: Fix for how the loop sync is done (#12941) --- .../stores/shipper/indexshipper/downloads/table_manager.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/storage/stores/shipper/indexshipper/downloads/table_manager.go b/pkg/storage/stores/shipper/indexshipper/downloads/table_manager.go index 9f2401c209d0e..612f1d1eaa2a9 100644 --- a/pkg/storage/stores/shipper/indexshipper/downloads/table_manager.go +++ b/pkg/storage/stores/shipper/indexshipper/downloads/table_manager.go @@ -111,14 +111,13 @@ func NewTableManager(cfg Config, openIndexFileFunc index.OpenIndexFileFunc, inde return nil, err } + // Increment the WaitGroup counter here before starting the goroutine + tm.wg.Add(1) go tm.loop() return tm, nil } func (tm *tableManager) loop() { - tm.tablesMtx.Lock() - tm.wg.Add(1) - tm.tablesMtx.Unlock() defer tm.wg.Done() syncTicker := time.NewTicker(tm.cfg.SyncInterval) From 7630f33ecfea6aa520c4259449eb890125a72a61 Mon Sep 17 00:00:00 2001 From: benclive Date: Mon, 13 May 2024 14:34:44 +0100 Subject: [PATCH 22/47] test: Added more test data for pattern detection tests (#12920) --- pkg/pattern/drain/drain_test.go | 449 +- pkg/pattern/drain/testdata/agent-logfmt.txt | 2 +- pkg/pattern/drain/testdata/calico.txt | 1000 ++++ pkg/pattern/drain/testdata/custom.txt | 0 .../drain/testdata/distributor-logfmt.txt | 5000 +++++++++++++++++ pkg/pattern/drain/testdata/journald.txt | 1000 ++++ pkg/pattern/drain/testdata/kafka.txt | 1000 ++++ pkg/pattern/drain/testdata/kubernetes.txt | 1000 ++++ pkg/pattern/drain/testdata/vault.txt | 1000 ++++ 9 files changed, 10357 insertions(+), 94 deletions(-) create mode 100644 pkg/pattern/drain/testdata/calico.txt delete mode 100644 pkg/pattern/drain/testdata/custom.txt create mode 100644 pkg/pattern/drain/testdata/distributor-logfmt.txt create mode 100644 pkg/pattern/drain/testdata/journald.txt create mode 100644 pkg/pattern/drain/testdata/kafka.txt create mode 100644 pkg/pattern/drain/testdata/kubernetes.txt create mode 100644 pkg/pattern/drain/testdata/vault.txt diff --git a/pkg/pattern/drain/drain_test.go b/pkg/pattern/drain/drain_test.go index ef7754c4ed57e..72b80aeb67d34 100644 --- a/pkg/pattern/drain/drain_test.go +++ b/pkg/pattern/drain/drain_test.go @@ -20,79 +20,334 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { }{ { // High variation leads to many patterns including some that are too generic (many tokens matched) and some that are too specific (too few matchers) - name: `Generate patterns on high variation logfmt logs`, + name: "Generate patterns on high variation logfmt logs", drain: New(DefaultConfig()), - inputFile: `testdata/agent-logfmt.txt`, + inputFile: "testdata/agent-logfmt.txt", patterns: []string{ - `ts=2024-04-16T15:10:43.192290389Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg="Adding target" key="/var/log/pods/*19a1cce8-5f04-46e0-a124-292b0dd9b343/testcoordinator/*.log:{batch_kubernetes_io_controller_uid=\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\", batch_kubernetes_io_job_name=\"testcoordinator-job-2665838\", container=\"testcoordinator\", controller_uid=\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\", job=\"k6-cloud/testcoordinator\", job_name=\"testcoordinator-job-2665838\", name=\"testcoordinator\", namespace=\"k6-cloud\", pod=\"testcoordinator-job-2665838-9g8ds\"}"`, - `<_> <_> level=info component=logs logs_config=default <_> target" <_> <_> <_> <_> <_> <_>`, - `<_> caller=filetarget.go:192 level=info component=logs logs_config=default msg="filetarget: watcher closed, tailer stopped, positions saved" <_>`, - `<_> caller=tailer.go:164 level=info component=logs logs_config=default component=tailer msg="tail routine: tail channel closed, stopping tailer" <_> reason=null`, - `<_> caller=tailer.go:207 level=info component=logs logs_config=default component=tailer msg="skipping update of position for a file which does not currently exist" <_>`, - `<_> caller=log.go:168 component=logs logs_config=default level=info msg="Successfully reopened <_>`, - `<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg="failed to decode logfmt" err="bufio.Scanner: token too long"`, - `<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg="received file watcher event" <_> op=CREATE`, - `<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg="failed to decode logfmt" err="logfmt syntax error at pos <_> on line 1: unexpected '\"'"`, - `<_> <_> level=info component=logs logs_config=default <_> <_> <_> <_> <_>`, - `<_> caller=log.go:168 component=logs logs_config=default level=info <_> <_> <_> <_> <_>`, - `<_> caller=filetarget.go:313 level=info component=logs logs_config=default msg="watching new directory" <_>`, - `<_> <_> level=info component=logs logs_config=default <_> target" <_> conprof=\"true\", <_> <_> job=\"hosted-grafana/grafana\", name=\"grafana\", namespace=\"hosted-grafana\", <_> plan=\"free\", <_> <_> <_> <_> <_>`, - `<_> level=info msg="finished node evaluation" controller_id=module.http.cloudwatch_pipelines <_> <_>`, - `2024-04-16 15:10:42.555 ts=2024-04-16T15:10:42.555230437Z level=info msg="finished node evaluation" controller_id=module.http.cloudwatch_pipelines node_id=prometheus.scrape.stack_378175_cloudwatch_notags duration=38.545339ms`, + "ts=2024-04-16T15:10:43.192290389Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg=\"Adding target\" key=\"/var/log/pods/*19a1cce8-5f04-46e0-a124-292b0dd9b343/testcoordinator/*.log:{batch_kubernetes_io_controller_uid=\\\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\\\", batch_kubernetes_io_job_name=\\\"testcoordinator-job-2665838\\\", container=\\\"testcoordinator\\\", controller_uid=\\\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\\\", job=\\\"k6-cloud/testcoordinator\\\", job_name=\\\"testcoordinator-job-2665838\\\", name=\\\"testcoordinator\\\", namespace=\\\"k6-cloud\\\", pod=\\\"testcoordinator-job-2665838-9g8ds\\\"}\"", + "<_> <_> level=info component=logs logs_config=default <_> target\" <_> <_> <_> <_> <_> <_>", + "<_> caller=filetarget.go:192 level=info component=logs logs_config=default msg=\"filetarget: watcher closed, tailer stopped, positions saved\" <_>", + "<_> caller=tailer.go:164 level=info component=logs logs_config=default component=tailer msg=\"tail routine: tail channel closed, stopping tailer\" <_> reason=null", + "<_> caller=tailer.go:207 level=info component=logs logs_config=default component=tailer msg=\"skipping update of position for a file which does not currently exist\" <_>", + "<_> caller=log.go:168 component=logs logs_config=default level=info msg=\"Successfully reopened <_>", + "<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg=\"failed to decode logfmt\" err=\"bufio.Scanner: token too long\"", + "<_> caller=filetargetmanager.go:181 level=info component=logs logs_config=default msg=\"received file watcher event\" <_> op=CREATE", + "<_> caller=logfmt.go:139 level=error component=logs logs_config=default component=file_pipeline component=stage type=logfmt msg=\"failed to decode logfmt\" err=\"logfmt syntax error at pos <_> on line 1: unexpected '\\\"'\"", + "<_> <_> level=info component=logs logs_config=default <_> <_> <_> <_> <_>", + "<_> caller=log.go:168 component=logs logs_config=default level=info <_> <_> <_> <_> <_>", + "<_> caller=filetarget.go:313 level=info component=logs logs_config=default msg=\"watching new directory\" <_>", + "<_> <_> level=info component=logs logs_config=default <_> target\" <_> conprof=\\\"true\\\", <_> <_> job=\\\"hosted-grafana/grafana\\\", name=\\\"grafana\\\", namespace=\\\"hosted-grafana\\\", <_> plan=\\\"free\\\", <_> <_> <_> <_> <_>", + "<_> level=info msg=\"finished node evaluation\" controller_id=module.http.cloudwatch_pipelines <_> <_>", }, }, { // Lower variation leads to fewer patterns including some with limited value (single lines, no matchers) - name: `Generate patterns on low variation logfmt logs`, + name: "Generate patterns on low variation logfmt logs", drain: New(DefaultConfig()), - inputFile: `testdata/ingester-logfmt.txt`, + inputFile: "testdata/ingester-logfmt.txt", patterns: []string{ - `<_> caller=head.go:216 level=debug tenant=987678 msg="profile is empty after delta computation" metricName=memory`, - `ts=2024-04-17T09:52:46.363974185Z caller=http.go:194 level=debug traceID=1b48f5156a61ca69 msg="GET /debug/pprof/delta_mutex (200) 1.161082ms"`, - `<_> caller=http.go:194 level=debug <_> <_> msg="POST /ingester.v1.IngesterService/Push (200) <_>`, // A perfect log line: Abstracted the variable part but kept the constants. + "<_> caller=head.go:216 level=debug tenant=987678 msg=\"profile is empty after delta computation\" metricName=memory", + "ts=2024-04-17T09:52:46.363974185Z caller=http.go:194 level=debug traceID=1b48f5156a61ca69 msg=\"GET /debug/pprof/delta_mutex (200) 1.161082ms\"", + "<_> caller=http.go:194 level=debug <_> <_> msg=\"POST /ingester.v1.IngesterService/Push (200) <_>", // A perfect log line: Abstracted the variable part but kept the constants. }, }, { // Lower variation logs in json leads to a high number of patterns with very few matchers - name: `Generate patterns on json formatted logs`, + name: "Generate patterns on json formatted logs", drain: New(DefaultConfig()), - inputFile: `testdata/drone-json.txt`, + inputFile: "testdata/drone-json.txt", patterns: []string{ - `<_> capacity <_>`, - `<_> capacity changes <_>`, - `{"id":"D4Oh1ivB6cdLWa08","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:48:52Z"}`, - `{"id":"q62wCcIkEOueqFKF","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T15:03:28Z"}`, - `{"id":"m6SpYHzdXrDAFqDR","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T15:02:58Z"}`, - `{"id":"T0I8Dsnw3uSi3Gal","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T15:02:28Z"}`, - `{"id":"9eA72xOtx8kzMhXn","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T15:01:58Z"}`, - `{"id":"pet7QVfO1yE8fk56","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T15:01:28Z"}`, - `{"id":"15eSzaEG0enf86Kl","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T15:00:57Z"}`, - `{"id":"JO1OT5ADoNA8NYqr","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T15:00:27Z"}`, - `{"id":"Xz2OCJhgeBSRFyoN","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:59:57Z"}`, - `{"id":"pPc2ORUhHAhFgBg3","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:59:27Z"}`, - `{"id":"4G6Srn6lSwzYrx19","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:58:57Z"}`, - `{"id":"1Lu90T1fWzsWOKlc","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:58:27Z"}`, - `{"id":"4XjwwNoOwZFaWePQ","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:57:57Z"}`, - `{"id":"IQy23J3NON0BV10V","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:57:26Z"}`, - `{"id":"FQ8wCQfaR9W387cH","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:56:56Z"}`, - `{"id":"Hhwn7ecXjxF67DG6","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:56:26Z"}`, - `{"id":"luflyGZvZnLzhQEH","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:55:56Z"}`, - `{"id":"q20GZcvyzMwrTGx5","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:55:26Z"}`, - `{"id":"3K61Yf6ImKYexoFx","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:54:56Z"}`, - `{"id":"SmbOO0l5aADX9BaQ","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:54:23Z"}`, - `{"id":"96TvvsMzSkkaW8oW","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:53:53Z"}`, - `{"id":"C7aYn8cb4NCrkkYI","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:53:23Z"}`, - `{"id":"CMG7ZwwYqNPBonAn","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:52:53Z"}`, - `{"id":"focV9BzODwRbWwKE","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:52:23Z"}`, - `{"id":"HphRnJOM8uYohf1p","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:51:53Z"}`, - `{"id":"m3n8GndhG45uGIQA","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:51:23Z"}`, - `{"id":"nTO38tWtnvRWRl1G","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:50:52Z"}`, - `{"id":"5qEIzErDfiALVPAN","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:50:22Z"}`, - `{"id":"q61oHTtF4MMiQVGH","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:49:52Z"}`, - `{"id":"4rNxIlhDKxGgzBHe","level":"debug","max-pool":4,"min-pool":0,"msg":"check capacity","pending-builds":0,"running-builds":0,"server-buffer":0,"server-capacity":0,"server-count":0,"time":"2024-04-16T14:49:22Z"}`, - `<_> server <_>`, - `<_> unfinished <_>`, - `<_> <_> (flow; linux; helm)"}`, + "<_> capacity <_>", + "<_> capacity changes <_>", + "{\"id\":\"D4Oh1ivB6cdLWa08\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:48:52Z\"}", + "{\"id\":\"q62wCcIkEOueqFKF\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T15:03:28Z\"}", + "{\"id\":\"m6SpYHzdXrDAFqDR\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T15:02:58Z\"}", + "{\"id\":\"T0I8Dsnw3uSi3Gal\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T15:02:28Z\"}", + "{\"id\":\"9eA72xOtx8kzMhXn\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T15:01:58Z\"}", + "{\"id\":\"pet7QVfO1yE8fk56\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T15:01:28Z\"}", + "{\"id\":\"15eSzaEG0enf86Kl\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T15:00:57Z\"}", + "{\"id\":\"JO1OT5ADoNA8NYqr\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T15:00:27Z\"}", + "{\"id\":\"Xz2OCJhgeBSRFyoN\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:59:57Z\"}", + "{\"id\":\"pPc2ORUhHAhFgBg3\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:59:27Z\"}", + "{\"id\":\"4G6Srn6lSwzYrx19\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:58:57Z\"}", + "{\"id\":\"1Lu90T1fWzsWOKlc\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:58:27Z\"}", + "{\"id\":\"4XjwwNoOwZFaWePQ\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:57:57Z\"}", + "{\"id\":\"IQy23J3NON0BV10V\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:57:26Z\"}", + "{\"id\":\"FQ8wCQfaR9W387cH\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:56:56Z\"}", + "{\"id\":\"Hhwn7ecXjxF67DG6\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:56:26Z\"}", + "{\"id\":\"luflyGZvZnLzhQEH\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:55:56Z\"}", + "{\"id\":\"q20GZcvyzMwrTGx5\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:55:26Z\"}", + "{\"id\":\"3K61Yf6ImKYexoFx\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:54:56Z\"}", + "{\"id\":\"SmbOO0l5aADX9BaQ\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:54:23Z\"}", + "{\"id\":\"96TvvsMzSkkaW8oW\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:53:53Z\"}", + "{\"id\":\"C7aYn8cb4NCrkkYI\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:53:23Z\"}", + "{\"id\":\"CMG7ZwwYqNPBonAn\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:52:53Z\"}", + "{\"id\":\"focV9BzODwRbWwKE\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:52:23Z\"}", + "{\"id\":\"HphRnJOM8uYohf1p\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:51:53Z\"}", + "{\"id\":\"m3n8GndhG45uGIQA\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:51:23Z\"}", + "{\"id\":\"nTO38tWtnvRWRl1G\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:50:52Z\"}", + "{\"id\":\"5qEIzErDfiALVPAN\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:50:22Z\"}", + "{\"id\":\"q61oHTtF4MMiQVGH\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:49:52Z\"}", + "{\"id\":\"4rNxIlhDKxGgzBHe\",\"level\":\"debug\",\"max-pool\":4,\"min-pool\":0,\"msg\":\"check capacity\",\"pending-builds\":0,\"running-builds\":0,\"server-buffer\":0,\"server-capacity\":0,\"server-count\":0,\"time\":\"2024-04-16T14:49:22Z\"}", + "<_> server <_>", + "<_> unfinished <_>", + "<_> <_> (flow; linux; helm)\"}", + }, + }, + { + name: "Patterns for distributor logs", + drain: New(DefaultConfig()), + inputFile: "testdata/distributor-logfmt.txt", + patterns: []string{ + `<_> caller=http.go:194 level=debug <_> <_> msg="POST <_> <_> <_>`, + }, + }, + { + name: "Patterns for journald logs", + drain: New(DefaultConfig()), + inputFile: "testdata/journald.txt", + patterns: []string{ + "2024-05-07T11:59:43.484606Z INFO ExtHandler ExtHandler Downloading agent manifest", + "<_> INFO TelemetryEventsCollector ExtHandler Collected 2 events for extension: Microsoft.Azure.Extensions.CustomScript", + "E0507 11:59:41.375655 4736 kuberuntime_manager.go:1256] container &Container{Name:ruler,Image:grafana/enterprise-metrics:v2.12.0,Command:[],Args:[-target=ruler -config.expand-env=true -config.file=/etc/mimir/mimir.yaml -distributor.remote-timeout=10s],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:9095,Protocol:TCP,HostIP:,},ContainerPort{Name:memberlist,HostPort:0,ContainerPort:7946,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:JAEGER_AGENT_HOST,Value:alloy-otlp.alloy-otlp.svc.cluster.local.,ValueFrom:nil,},EnvVar{Name:JAEGER_TAGS,Value:namespace=ge-metrics-federation,cluster=dev-us-central-0,ValueFrom:nil,},EnvVar{Name:JAEGER_SAMPLER_MANAGER_HOST_PORT,Value:http://alloy-otlp.alloy-otlp.svc.cluster.local.:5778/sampling,ValueFrom:nil,},EnvVar{Name:GOOGLE_APPLICATION_CREDENTIALS,Value:/var/secrets/google/credentials.json,ValueFrom:nil,},EnvVar{Name:AM_TOKEN,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:ruler-alertmanager-token,},Key:token,Optional:nil,},},},EnvVar{Name:JAEGER_REPORTER_MAX_QUEUE_SIZE,Value:1000,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:gcs-credentials,ReadOnly:false,MountPath:/var/secrets/google/,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:license,ReadOnly:false,MountPath:/license,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:runtime-config,ReadOnly:false,MountPath:/var/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:storage,ReadOnly:false,MountPath:/data,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:active-queries,ReadOnly:false,MountPath:/active-query-tracker,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-jtnbs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{1 0 http-metrics},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:45,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod gem-mimir-ruler-5f56f7846b-fgxdm_ge-metrics-federation(07c06e21-137b-4fdd-b7d3-703f0a567720): CreateContainerConfigError: secret \"ruler-alertmanager-token\" not found", + "\tts=2024-05-07T11:59:32.025687537Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg=\"request", + "time=\"2024-05-07T11:59:38.484586527Z\" level=error msg=\"Failed to delete exec process \\\"d9e0a1867ce73695ad859f2b0a76fe8f5053db8a5e49142d747e53a445729bd4\\\" for container \\\"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408\\\"\" error=\"ttrpc: closed: unknown\"", + "I0507 <_> <_> prober.go:107] \"Probe failed\" probeType=\"Readiness\" <_> <_> <_> probeResult=\"failure\" output=\"HTTP probe failed with statuscode: <_>", + "net_ratelimit: 2 callbacks suppressed", + "kauditd_printk_skb: <_> callbacks suppressed", + "Started cri-containerd-95bf586cd79d43120ff44582d4dbd2476de61744411f8515b9b2c527a41fd5d9.scope.", + "Removed slice libcontainer container kubepods-burstable-pod25cb986c_3d6c_4ed0_abf3_ee59ed6175f9.slice.", + "E0507 11:59:34.923938 3027 kuberuntime_manager.go:1261] container &Container{Name:mysqld-exporter,Image:prom/mysqld-exporter:v0.13.0,Command:[],Args:[--collect.info_schema.innodb_metrics],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:MYSQL_USER,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:username,Optional:nil,},},},EnvVar{Name:MYSQL_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:password,Optional:nil,},},},EnvVar{Name:MYSQL_HOST,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:endpoint,Optional:nil,},},},EnvVar{Name:MYSQL_PORT,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:port,Optional:nil,},},},EnvVar{Name:MYSQL_TLS_MODE,Value:preferred,ValueFrom:nil,},EnvVar{Name:DATA_SOURCE_NAME,Value:$(MYSQL_USER):$(MYSQL_PASSWORD)@tcp($(MYSQL_HOST):$(MYSQL_PORT))/?tls=$(MYSQL_TLS_MODE),ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dzx7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod testcrossplane-exporter-c67cfc58f-vbzl4_crossplane-playground(3d49134d-3378-4ec3-824c-5ff4ea2590a5): CreateContainerConfigError: secret \"testcrossplane-user-exporter\" not found", + "I0507 <_> 3224 <_> <_> <_> for volume <_> (UniqueName: <_> <_> <_> <_> <_> <_>", + "E0507 <_> <_> kuberuntime_manager.go:1256] container &Container{Name:ruler,Image:grafana/enterprise-metrics:v2.11.1,Command:[],Args:[-target=ruler -config.expand-env=true <_> {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} <_> 0 http-metrics},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:45,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> CreateContainerConfigError: secret \"ruler-alertmanager-token\" not found", + "time=\"2024-05-07T11:59:34.707025668Z\" level=info msg=\"StopPodSandbox for \\\"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\\\" returns successfully\"", + "time=\"2024-05-07T11:59:34.706960850Z\" level=info msg=\"TearDown network for sandbox \\\"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\\\" successfully\"", + "time=\"2024-05-07T11:59:34.592084495Z\" level=info msg=\"Container to stop \\\"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\\\" must be in running or unknown state, current state \\\"CONTAINER_EXITED\\\"\"", + "time=\"2024-05-07T11:59:34.592005066Z\" level=info msg=\"StopPodSandbox for \\\"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\\\"\"", + "time=\"2024-05-07T11:59:34.591282703Z\" level=info msg=\"StopContainer for \\\"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\\\" returns successfully\"", + "time=\"2024-05-07T11:59:34.520032214Z\" level=info msg=\"Stop container \\\"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\\\" with signal terminated\"", + "time=\"2024-05-07T11:59:34.519591759Z\" level=info msg=\"StopContainer for \\\"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\\\" with timeout 30 (s)\"", + "E0507 <_> <_> pod_workers.go:1300] \"Error syncing pod, skipping\" err=\"failed to \\\"StartContainer\\\" for \\\"grafana\\\" with ErrImagePull: \\\"[rpc error: code = NotFound desc = failed to pull and unpack image <_> failed to resolve reference <_> <_> not found, failed to pull and unpack image <_> failed to resolve reference <_> unexpected status from HEAD request to <_> 403 Forbidden]\\\"\" <_> <_>", + "\t\t\t\t\t\twhile [ \"$(pidof plugins-pause)\" = \"\" ]; do sleep 0.5; done;", + "\t\t\t\t\t\tln --force -s /proc/$(pidof hgrun-pause)/root/bin/hgrun /bin/hgrun;", + "\t\t\t\t\t\texec /bin/hgrun -log.level=debug launch -bundledPluginsManifest /proc/$(pidof plugins-pause)/root/manifest.json -bundledPluginsDir /proc/$(pidof <_> -profile-port=6060 <_> {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{26 -3} {} 26m DecimalSI},memory: {{293601280 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/api/health,Port:{0 80 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:10,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/bin/hgrun check],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/hgrun drain -timeout 1m0s -waitTime 55s],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[SYS_PTRACE],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> ErrImagePull: [rpc error: code = NotFound desc = failed to pull and unpack image <_> failed to resolve reference <_> <_> not found, failed to pull and unpack image <_> failed to resolve reference <_> unexpected status from HEAD request to <_> 403 Forbidden]", + "<_> level=error msg=\"PullImage <_> failed\" error=\"failed to pull and unpack image <_> failed to resolve reference <_> unexpected status from HEAD request to <_> 403 Forbidden\"", + "<_> level=error msg=\"PullImage <_> failed\" error=\"rpc error: code = NotFound desc = failed to pull and unpack image <_> failed to resolve reference <_> <_> not found\"", + "<_> level=info msg=\"trying next host - response was http.StatusNotFound\" host=us.gcr.io", + "I0507 11:59:34.518822 3224 kuberuntime_container.go:745] \"Killing container with a grace period\" pod=\"hosted-grafana/hosted-grafana-api-7b6bd9b949-9csb4\" podUID=\"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" containerName=\"hgapi\" containerID=\"containerd://c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" gracePeriod=30", + "E0507 <_> <_> prober.go:239] \"Unable to write all bytes from execInContainer\" err=\"short write\" <_> actualBytes=10240", + "I0507 11:59:33.422254 1537502 kubelet_getters.go:187] \"Pod status updated\" pod=\"kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x28r\" status=\"Running\"", + "<_> level=info msg=\"RemoveContainer for <_> returns successfully\"", + "<_> level=info msg=\"RemoveContainer for <_>", + "E0507 <_> <_> prober.go:104] \"Probe errored\" err=\"rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found\" probeType=\"Readiness\" <_> <_> containerName=\"grafana\"", + "<_> level=error msg=\"ExecSync for <_> failed\" error=\"rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found\"", + " >", + "E0507 <_> <_> remote_image.go:180] \"PullImage from image service failed\" err=\"rpc error: code = Unknown desc = failed to pull and unpack image <_> failed to resolve reference <_> unexpected status from HEAD request to <_> 403 Forbidden\" <_>", + "E0507 <_> <_> remote_runtime.go:496] \"ExecSync cmd from runtime service failed\" err=\"rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task <_> not found: not found\" <_> cmd=[\"/bin/hgrun\",\"check\"]", + "<_> level=error caller=http_client.go:56 app=hgrun <_> msg=\"request failed\" error=\"Get \\\"http://127.0.0.1:3000/api/health\\\": dial tcp 127.0.0.1:3000: connect: connection refused\" method=GET url=http://127.0.0.1:3000/api/health", + "<_> level=warning msg=\"cleaning up after shim disconnected\" <_> namespace=k8s.io", + "<_> level=info msg=\"cleaning up dead shim\" namespace=k8s.io", + "<_> level=info msg=\"shim disconnected\" <_> namespace=k8s.io", + "I0507 11:59:32.409568 581823 cache.go:40] re-using cached key and certificate", + "I0507 <_> <_> <_> <_> (PLEG): <_> <_> <_> <_> <_>", + "<_> level=info msg=\"StartContainer for <_> returns successfully\"", + "audit: type=1400 <_> apparmor=\"DENIED\" operation=\"ptrace\" profile=\"cri-containerd.apparmor.d\" <_> comm=\"pidof\" requested_mask=\"read\" denied_mask=\"read\" peer=\"unconfined\"", + "AVC apparmor=\"DENIED\" operation=\"ptrace\" profile=\"cri-containerd.apparmor.d\" <_> comm=\"pidof\" requested_mask=\"read\" denied_mask=\"read\" peer=\"unconfined\"", + "Started libcontainer container <_>", + "<_> level=info msg=\"StartContainer for <_>", + "<_> level=info msg=\"CreateContainer within sandbox <_> for <_> returns container id <_>", + "<_> level=info msg=\"CreateContainer within sandbox <_> for container <_>", + "<_> level=info msg=\"PullImage <_>", + "<_> level=info msg=\"PullImage <_> returns image reference <_>", + "<_> level=info msg=\"Pulled image <_> with image id <_> repo tag <_> repo digest <_> size <_> in <_>", + "<_> level=info msg=\"ImageUpdate event <_> labels:{key:\\\"io.cri-containerd.image\\\" value:\\\"managed\\\"}\"", + "<_> level=info msg=\"stop pulling image <_> active requests=0, bytes <_>", + "<_> level=info msg=\"ImageCreate event <_> labels:{key:\\\"io.cri-containerd.image\\\" value:\\\"managed\\\"}\"", + "E0507 <_> <_> kuberuntime_manager.go:1256] container <_> set -e; while [ \"$(pidof hgrun-pause)\" = \"\" ]; do sleep 0.5; done;", + "I0507 <_> 6247 prober.go:107] \"Probe failed\" probeType=\"Readiness\" pod=\"grafana-agent/grafana-agent-helm-4\" podUID=\"c36c5200-1cd6-4093-893c-c022f91af996\" containerName=\"grafana-agent\" probeResult=\"failure\" output=\"Get \\\"http://10.0.99.125:3090/-/ready\\\": dial tcp 10.0.99.125:3090: connect: connection refused\"", + "<_> Consumed <_> CPU time.", + "<_> Deactivated successfully.", + "RCV: Reply message on eth0 from fe80::e9:7eff:fedf:3d37.", + "XMT: Renew on eth0, interval 9700ms.", + "PRC: Renewing lease on eth0.", + "I0507 <_> <_> prober.go:107] \"Probe failed\" probeType=\"Readiness\" <_> <_> containerName=\"grafana\" probeResult=\"failure\" output=<", + "I0507 <_> 2791 azure_credentials.go:220] <_> is not from ACR, return empty authentication", + "I0507 <_> <_> <_> \"Cleaned up orphaned pod volumes dir\" <_> <_>", + "XMT: Solicit on eth0, interval <_>", + "I0507 <_> <_> cache.go:40] re-using cached key and certificate", + "ll header: 00000000: 42 01 0a 80 00 <_> 42 01 0a 80 00 01 08 00", + "IPv4: martian source <_> from <_> on dev eth0", + "I0507 11:59:29.320184 1537502 kubelet_pods.go:906] \"Unable to retrieve pull secret, the image pull may not succeed.\" pod=\"logs-endpoint-dev-005/kafka-controller-0\" secret=\"\" err=\"secret \\\"not-needed\\\" not found\"", + "E0507 <_> <_> kuberuntime_manager.go:1256] container &Container{Name:pdc,Image:us.gcr.io/hosted-grafana/pdc:0.1.415,Command:[],Args:[-proxy.auth.ca-keys-dir=/var/run/secrets/pdc-certs -proxy.socks-server.addr=:10443 -proxy.ssh-server.addr=:2222 -proxy.use-socks-username-for-routing -proxy.api.http-address=:9182 -proxy.check-connpool-address-in-ring -memberlist.join=dns+gossip-ring.pdc.svc.cluster.local:7946 -api.http-address=:11443 -distributor.enabled=true -distributor.addr=:10444 -distributor.use-socks-username-for-routing -gateway.enabled=true -gateway.addr=:2244 -log.level=debug -certs.ca-private-key-file=/var/run/secrets/pdc-certs/ca.key -certs.ca-cert-file=/var/run/secrets/pdc-certs/ca.crt -certs.ca-pub-file=/var/run/secrets/pdc-certs/ca.pub -certs.cluster=local-k8s -shard-size=3 -graceful-shutdown-period=30s -enable-multiple-networks],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:socks,HostPort:0,ContainerPort:10443,Protocol:TCP,HostIP:,},ContainerPort{Name:ssh,HostPort:0,ContainerPort:2222,Protocol:TCP,HostIP:,},ContainerPort{Name:distributor,HostPort:0,ContainerPort:10444,Protocol:TCP,HostIP:,},ContainerPort{Name:gateway,HostPort:0,ContainerPort:2244,Protocol:TCP,HostIP:,},ContainerPort{Name:api,HostPort:0,ContainerPort:11443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{250 -3} {} 250m DecimalSI},memory: {{67108864 0} {} <_> 11443 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:40,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/sleep 5],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Never,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod <_> ErrImageNeverPull: Container image \"us.gcr.io/hosted-grafana/pdc:0.1.415\" is not present with pull policy of Never", + "I0507 <_> <_> kubelet_pods.go:906] \"Unable to retrieve pull secret, the image pull may not succeed.\" <_> secret=\"\" err=\"secret <_> not found\"", + "I0507 <_> 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume <_> (OuterVolumeSpecName: <_> pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\"). InnerVolumeSpecName <_> PluginName <_> VolumeGidValue \"\"", + "E0507 <_> <_> pod_workers.go:1300] \"Error syncing pod, skipping\" <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> <_>", + "E0507 <_> <_> pod_workers.go:1300] \"Error syncing pod, skipping\" err=\"failed to \\\"StartContainer\\\" for <_> with CrashLoopBackOff: \\\"back-off <_> restarting failed <_> <_> <_> <_>", + "I0507 <_> <_> <_> <_> <_> <_> <_> <_>", + "I0507 <_> <_> <_> \"SyncLoop <_> source=\"api\" <_>", + "<_> level=error msg=\"ContainerStatus for <_> failed\" error=\"rpc error: code = NotFound desc = an error occurred when try to find container <_> not found\"", + "I0507 <_> <_> scope.go:117] \"RemoveContainer\" <_>", + "E0507 <_> <_> remote_image.go:180] \"PullImage from image service failed\" err=\"rpc error: code = NotFound desc = failed to pull and unpack image <_> failed to resolve reference <_> <_> not found\" <_>", + "I0507 <_> <_> pod_container_deletor.go:53] \"DeleteContainer returned error\" <_> err=\"failed to get container status <_> rpc error: code = NotFound desc = an error occurred when try to find container <_> not found\"", + "E0507 <_> <_> pod_workers.go:1300] \"Error syncing pod, skipping\" err=\"failed to \\\"StartContainer\\\" for \\\"pdc\\\" with ErrImageNeverPull: \\\"Container image \\\\\\\"us.gcr.io/hosted-grafana/pdc:0.1.415\\\\\\\" is not present with pull policy of Never\\\"\" <_> <_>", + "E0507 <_> <_> remote_runtime.go:432] \"ContainerStatus from runtime service failed\" err=\"rpc error: code = NotFound desc = an error occurred when try to find container <_> not found\" <_>", + }, + }, + { + name: "Patterns for kafka logs", + drain: New(DefaultConfig()), + inputFile: "testdata/kafka.txt", + patterns: []string{ + `[2024-05-07 <_> INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-0, dir=/bitnami/kafka/data] Deleting segment files <_> size=948, <_> <_> (kafka.log.LocalLog$)`, + `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Deleting segments due to log start offset <_> breach: <_> size=948, <_> <_> size=948, <_> <_> (kafka.log.UnifiedLog)`, + `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Incremented log start offset to <_> due to leader offset increment (kafka.log.UnifiedLog)`, + `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Deleting segments due to log start offset <_> breach: <_> <_> <_> <_> (kafka.log.UnifiedLog)`, + `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Incremented log start offset to <_> due to segment deletion (kafka.log.UnifiedLog)`, + `[2024-05-07 <_> INFO [UnifiedLog <_> dir=/bitnami/kafka/data] Deleting segment <_> <_> <_> <_> due to retention size <_> breach. Log size after deletion will be <_> (kafka.log.UnifiedLog)`, + `[2024-05-07 <_> INFO [ProducerStateManager <_> Wrote producer snapshot at offset <_> with 0 producer ids in <_> ms. (kafka.log.ProducerStateManager)`, + `[2024-05-07 10:55:53,038] INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-1, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=447957, size=948, lastModifiedTime=1715059232052, largestRecordTimestamp=Some(1715059232002)),LogSegment(baseOffset=447969, size=948, lastModifiedTime=1715059424352, largestRecordTimestamp=Some(1715059424301)) (kafka.log.LocalLog$)`, + `[2024-05-07 <_> INFO [LocalLog <_> dir=/bitnami/kafka/data] Rolled new log segment at offset <_> in <_> ms. (kafka.log.LocalLog)`, + `[2024-05-07 10:55:40,638] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=180400817, size=16997594, lastModifiedTime=1715075775780, largestRecordTimestamp=Some(1715075775771)),LogSegment(baseOffset=180403261, size=16992344, lastModifiedTime=1715075781053, largestRecordTimestamp=Some(1715075781021)),LogSegment(baseOffset=180405723, size=16989895, lastModifiedTime=1715075786205, largestRecordTimestamp=Some(1715075786174)),LogSegment(baseOffset=180408118, size=16998698, lastModifiedTime=1715075791681, largestRecordTimestamp=Some(1715075791673)),LogSegment(baseOffset=180410608, size=16995676, lastModifiedTime=1715075796438, largestRecordTimestamp=Some(1715075796430)),LogSegment(baseOffset=180412733, size=16963278, lastModifiedTime=1715075800534, largestRecordTimestamp=Some(1715075800511)),LogSegment(baseOffset=180414883, size=16984328, lastModifiedTime=1715075805272, largestRecordTimestamp=Some(1715075805230)),LogSegment(baseOffset=180417063, size=16989109, lastModifiedTime=1715075810381, largestRecordTimestamp=Some(1715075810372)),LogSegment(baseOffset=180419267, size=16996871, lastModifiedTime=1715075815153, largestRecordTimestamp=Some(1715075815125)),LogSegment(baseOffset=180421560, size=16988558, lastModifiedTime=1715075819785, largestRecordTimestamp=Some(1715075819763)),LogSegment(baseOffset=180424008, size=16999292, lastModifiedTime=1715075825336, largestRecordTimestamp=Some(1715075825303)),LogSegment(baseOffset=180426459, size=16990595, lastModifiedTime=1715075830839, largestRecordTimestamp=Some(1715075830827)),LogSegment(baseOffset=180428944, size=16995859, lastModifiedTime=1715075835942, largestRecordTimestamp=Some(1715075835904)),LogSegment(baseOffset=180431327, size=16992294, lastModifiedTime=1715075841219, largestRecordTimestamp=Some(1715075841214)),LogSegment(baseOffset=180433867, size=16966736, lastModifiedTime=1715075846443, largestRecordTimestamp=Some(1715075846401)),LogSegment(baseOffset=180436204, size=16894731, lastModifiedTime=1715075853273, largestRecordTimestamp=Some(1715075853244)),LogSegment(baseOffset=180438984, size=16983529, lastModifiedTime=1715075858911, largestRecordTimestamp=Some(1715075858891)),LogSegment(baseOffset=180441466, size=16996933, lastModifiedTime=1715075863566, largestRecordTimestamp=Some(1715075863554)),LogSegment(baseOffset=180443778, size=16999841, lastModifiedTime=1715075866199, largestRecordTimestamp=Some(1715075866185)),LogSegment(baseOffset=180445367, size=16992471, lastModifiedTime=1715075870385, largestRecordTimestamp=Some(1715075870347)),LogSegment(baseOffset=180447366, size=16999996, lastModifiedTime=1715075875102, largestRecordTimestamp=Some(1715075875091)),LogSegment(baseOffset=180449601, size=16994426, lastModifiedTime=1715075879927, largestRecordTimestamp=Some(1715075879926)),LogSegment(baseOffset=180452079, size=16998020, lastModifiedTime=1715075885293, largestRecordTimestamp=Some(1715075885263)),LogSegment(baseOffset=180454546, size=16992231, lastModifiedTime=1715075890424, largestRecordTimestamp=Some(1715075890409)),LogSegment(baseOffset=180456986, size=16970315, lastModifiedTime=1715075895719, largestRecordTimestamp=Some(1715075895690)),LogSegment(baseOffset=180459366, size=16990785, lastModifiedTime=1715075900996, largestRecordTimestamp=Some(1715075900985)),LogSegment(baseOffset=180461885, size=16996655, lastModifiedTime=1715075905847, largestRecordTimestamp=Some(1715075905841)),LogSegment(baseOffset=180464299, size=16982181, lastModifiedTime=1715075911052, largestRecordTimestamp=Some(1715075911028)),LogSegment(baseOffset=180466821, size=16997630, lastModifiedTime=1715075915962, largestRecordTimestamp=Some(1715075915953)),LogSegment(baseOffset=180468968, size=16995723, lastModifiedTime=1715075920325, largestRecordTimestamp=Some(1715075920308)),LogSegment(baseOffset=180471046, size=16979316, lastModifiedTime=1715075924724, largestRecordTimestamp=Some(1715075924697)),LogSegment(baseOffset=180473259, size=16995238, lastModifiedTime=1715075929645, largestRecordTimestamp=Some(1715075929624)),LogSegment(baseOffset=180475486, size=16988461, lastModifiedTime=1715075934288, largestRecordTimestamp=Some(1715075934283)),LogSegment(baseOffset=180477735, size=16993767, lastModifiedTime=1715075939277, largestRecordTimestamp=Some(1715075939270)),LogSegment(baseOffset=180480095, size=16995409, lastModifiedTime=1715075944639, largestRecordTimestamp=Some(1715075944635)),LogSegment(baseOffset=180482560, size=16992784, lastModifiedTime=1715075949760, largestRecordTimestamp=Some(1715075949760)),LogSegment(baseOffset=180484967, size=16990838, lastModifiedTime=1715075954937, largestRecordTimestamp=Some(1715075954929)),LogSegment(baseOffset=180487377, size=16976794, lastModifiedTime=1715075960151, largestRecordTimestamp=Some(1715075960119)),LogSegment(baseOffset=180489919, size=16997379, lastModifiedTime=1715075965116, largestRecordTimestamp=Some(1715075965085)),LogSegment(baseOffset=180492304, size=16956613, lastModifiedTime=1715075970448, largestRecordTimestamp=Some(1715075970424)),LogSegment(baseOffset=180494832, size=16895640, lastModifiedTime=1715075975354, largestRecordTimestamp=Some(1715075975341)),LogSegment(baseOffset=180496930, size=16998328, lastModifiedTime=1715075979813, largestRecordTimestamp=Some(1715075979796)),LogSegment(baseOffset=180499079, size=16995699, lastModifiedTime=1715075984309, largestRecordTimestamp=Some(1715075984285)),LogSegment(baseOffset=180501183, size=16993785, lastModifiedTime=1715075989086, largestRecordTimestamp=Some(1715075989064)),LogSegment(baseOffset=180503431, size=16989600, lastModifiedTime=1715075993713, largestRecordTimestamp=Some(1715075993683)),LogSegment(baseOffset=180505674, size=16984790, lastModifiedTime=1715075998337, largestRecordTimestamp=Some(1715075998318)),LogSegment(baseOffset=180508022, size=16982630, lastModifiedTime=1715076003671, largestRecordTimestamp=Some(1715076003660)),LogSegment(baseOffset=180510439, size=16999488, lastModifiedTime=1715076009000, largestRecordTimestamp=Some(1715076008996)),LogSegment(baseOffset=180512848, size=16997845, lastModifiedTime=1715076014033, largestRecordTimestamp=Some(1715076014032)),LogSegment(baseOffset=180515281, size=16990661, lastModifiedTime=1715076019245, largestRecordTimestamp=Some(1715076019216)),LogSegment(baseOffset=180517815, size=16996244, lastModifiedTime=1715076023989, largestRecordTimestamp=Some(1715076023963)),LogSegment(baseOffset=180520112, size=16992012, lastModifiedTime=1715076029243, largestRecordTimestamp=Some(1715076029231)) (kafka.log.LocalLog$)`, + `[2024-05-07 10:55:40,626] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=180391157, size=16991045, lastModifiedTime=1715075754780, largestRecordTimestamp=Some(1715075754774)),LogSegment(baseOffset=180393429, size=16997692, lastModifiedTime=1715075760206, largestRecordTimestamp=Some(1715075760186)),LogSegment(baseOffset=180395889, size=16998200, lastModifiedTime=1715075765542, largestRecordTimestamp=Some(1715075765526)),LogSegment(baseOffset=180398373, size=16977347, lastModifiedTime=1715075770515, largestRecordTimestamp=Some(1715075770504)) (kafka.log.LocalLog$)`, + `[2024-05-07 10:55:40,559] INFO [LocalLog partition=ingest-7, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=179133378, size=16987985, lastModifiedTime=1715075760072, largestRecordTimestamp=Some(1715075760047)),LogSegment(baseOffset=179135832, size=16999459, lastModifiedTime=1715075765431, largestRecordTimestamp=Some(1715075765398)),LogSegment(baseOffset=179138321, size=16994485, lastModifiedTime=1715075770425, largestRecordTimestamp=Some(1715075770404)),LogSegment(baseOffset=179140761, size=16996810, lastModifiedTime=1715075775622, largestRecordTimestamp=Some(1715075775619)),LogSegment(baseOffset=179143198, size=16998520, lastModifiedTime=1715075780912, largestRecordTimestamp=Some(1715075780889)),LogSegment(baseOffset=179145674, size=16988474, lastModifiedTime=1715075786051, largestRecordTimestamp=Some(1715075786030)),LogSegment(baseOffset=179148084, size=16956099, lastModifiedTime=1715075791514, largestRecordTimestamp=Some(1715075791486)),LogSegment(baseOffset=179150568, size=16995476, lastModifiedTime=1715075796360, largestRecordTimestamp=Some(1715075796329)),LogSegment(baseOffset=179152727, size=16993313, lastModifiedTime=1715075800440, largestRecordTimestamp=Some(1715075800430)),LogSegment(baseOffset=179154861, size=16992142, lastModifiedTime=1715075805147, largestRecordTimestamp=Some(1715075805135)),LogSegment(baseOffset=179157056, size=16999919, lastModifiedTime=1715075810155, largestRecordTimestamp=Some(1715075810153)),LogSegment(baseOffset=179159230, size=16995021, lastModifiedTime=1715075815018, largestRecordTimestamp=Some(1715075815016)),LogSegment(baseOffset=179161550, size=16966526, lastModifiedTime=1715075819528, largestRecordTimestamp=Some(1715075819521)),LogSegment(baseOffset=179163962, size=16990848, lastModifiedTime=1715075825066, largestRecordTimestamp=Some(1715075825042)),LogSegment(baseOffset=179166414, size=16997833, lastModifiedTime=1715075830662, largestRecordTimestamp=Some(1715075830656)),LogSegment(baseOffset=179168915, size=16992619, lastModifiedTime=1715075835771, largestRecordTimestamp=Some(1715075835741)),LogSegment(baseOffset=179171302, size=16999091, lastModifiedTime=1715075841031, largestRecordTimestamp=Some(1715075841022)),LogSegment(baseOffset=179173853, size=16993953, lastModifiedTime=1715075846197, largestRecordTimestamp=Some(1715075846181)),LogSegment(baseOffset=179176191, size=16997479, lastModifiedTime=1715075853192, largestRecordTimestamp=Some(1715075853172)),LogSegment(baseOffset=179179037, size=16997174, lastModifiedTime=1715075858693, largestRecordTimestamp=Some(1715075858682)),LogSegment(baseOffset=179181478, size=16986004, lastModifiedTime=1715075863400, largestRecordTimestamp=Some(1715075863396)),LogSegment(baseOffset=179183786, size=16995316, lastModifiedTime=1715075866123, largestRecordTimestamp=Some(1715075866112)),LogSegment(baseOffset=179185434, size=16990492, lastModifiedTime=1715075870154, largestRecordTimestamp=Some(1715075870146)),LogSegment(baseOffset=179187398, size=16999541, lastModifiedTime=1715075874980, largestRecordTimestamp=Some(1715075874961)),LogSegment(baseOffset=179189664, size=16987383, lastModifiedTime=1715075879670, largestRecordTimestamp=Some(1715075879639)),LogSegment(baseOffset=179192076, size=16991701, lastModifiedTime=1715075885010, largestRecordTimestamp=Some(1715075884995)),LogSegment(baseOffset=179194546, size=16989109, lastModifiedTime=1715075890220, largestRecordTimestamp=Some(1715075890208)),LogSegment(baseOffset=179197009, size=16962782, lastModifiedTime=1715075895466, largestRecordTimestamp=Some(1715075895456)),LogSegment(baseOffset=179199373, size=16974715, lastModifiedTime=1715075900757, largestRecordTimestamp=Some(1715075900746)),LogSegment(baseOffset=179201897, size=16993973, lastModifiedTime=1715075905639, largestRecordTimestamp=Some(1715075905638)),LogSegment(baseOffset=179204346, size=16979828, lastModifiedTime=1715075910798, largestRecordTimestamp=Some(1715075910782)),LogSegment(baseOffset=179206836, size=16992092, lastModifiedTime=1715075915638, largestRecordTimestamp=Some(1715075915632)),LogSegment(baseOffset=179208986, size=16988849, lastModifiedTime=1715075920193, largestRecordTimestamp=Some(1715075920176)),LogSegment(baseOffset=179211133, size=16989206, lastModifiedTime=1715075924352, largestRecordTimestamp=Some(1715075924338)),LogSegment(baseOffset=179213268, size=16989737, lastModifiedTime=1715075929343, largestRecordTimestamp=Some(1715075929332)),LogSegment(baseOffset=179215514, size=16997903, lastModifiedTime=1715075934074, largestRecordTimestamp=Some(1715075934056)),LogSegment(baseOffset=179217793, size=16995100, lastModifiedTime=1715075938937, largestRecordTimestamp=Some(1715075938925)),LogSegment(baseOffset=179220122, size=16981574, lastModifiedTime=1715075944296, largestRecordTimestamp=Some(1715075944288)),LogSegment(baseOffset=179222600, size=16999794, lastModifiedTime=1715075949454, largestRecordTimestamp=Some(1715075949432)),LogSegment(baseOffset=179224988, size=16998870, lastModifiedTime=1715075954567, largestRecordTimestamp=Some(1715075954544)),LogSegment(baseOffset=179227402, size=16986053, lastModifiedTime=1715075959815, largestRecordTimestamp=Some(1715075959813)),LogSegment(baseOffset=179229948, size=16999937, lastModifiedTime=1715075964787, largestRecordTimestamp=Some(1715075964779)),LogSegment(baseOffset=179232368, size=16992995, lastModifiedTime=1715075970109, largestRecordTimestamp=Some(1715075970096)),LogSegment(baseOffset=179234885, size=16995271, lastModifiedTime=1715075975078, largestRecordTimestamp=Some(1715075975066)),LogSegment(baseOffset=179237038, size=16987833, lastModifiedTime=1715075979534, largestRecordTimestamp=Some(1715075979499)),LogSegment(baseOffset=179239147, size=16844618, lastModifiedTime=1715075984150, largestRecordTimestamp=Some(1715075984139)),LogSegment(baseOffset=179241334, size=16968482, lastModifiedTime=1715075988727, largestRecordTimestamp=Some(1715075988700)),LogSegment(baseOffset=179243472, size=16991395, lastModifiedTime=1715075993359, largestRecordTimestamp=Some(1715075993333)),LogSegment(baseOffset=179245756, size=16985926, lastModifiedTime=1715075998010, largestRecordTimestamp=Some(1715075998005)),LogSegment(baseOffset=179248096, size=16948574, lastModifiedTime=1715076003328, largestRecordTimestamp=Some(1715076003298)),LogSegment(baseOffset=179250530, size=16986047, lastModifiedTime=1715076008650, largestRecordTimestamp=Some(1715076008628)),LogSegment(baseOffset=179252915, size=16998875, lastModifiedTime=1715076013551, largestRecordTimestamp=Some(1715076013516)),LogSegment(baseOffset=179255312, size=16997990, lastModifiedTime=1715076018832, largestRecordTimestamp=Some(1715076018797)),LogSegment(baseOffset=179257861, size=16999525, lastModifiedTime=1715076023621, largestRecordTimestamp=Some(1715076023601)),LogSegment(baseOffset=179260226, size=16997755, lastModifiedTime=1715076028814, largestRecordTimestamp=Some(1715076028800)),LogSegment(baseOffset=179262715, size=16981492, lastModifiedTime=1715076034150, largestRecordTimestamp=Some(1715076034140)),LogSegment(baseOffset=179265040, size=16998332, lastModifiedTime=1715076038676, largestRecordTimestamp=Some(1715076038657)) (kafka.log.LocalLog$)`, + `[2024-05-07 10:55:40,713] INFO [LocalLog partition=ingest-3, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=182526165, size=16998661, lastModifiedTime=1715075758062, largestRecordTimestamp=Some(1715075758061)),LogSegment(baseOffset=182528560, size=16999718, lastModifiedTime=1715075763583, largestRecordTimestamp=Some(1715075763577)),LogSegment(baseOffset=182531056, size=16994792, lastModifiedTime=1715075768711, largestRecordTimestamp=Some(1715075768697)),LogSegment(baseOffset=182533514, size=16987578, lastModifiedTime=1715075773552, largestRecordTimestamp=Some(1715075773536)),LogSegment(baseOffset=182535953, size=16987705, lastModifiedTime=1715075779055, largestRecordTimestamp=Some(1715075779046)),LogSegment(baseOffset=182538482, size=16997466, lastModifiedTime=1715075784005, largestRecordTimestamp=Some(1715075784004)),LogSegment(baseOffset=182540856, size=16981250, lastModifiedTime=1715075789523, largestRecordTimestamp=Some(1715075789487)),LogSegment(baseOffset=182543386, size=16980484, lastModifiedTime=1715075794637, largestRecordTimestamp=Some(1715075794632)),LogSegment(baseOffset=182545622, size=16999738, lastModifiedTime=1715075799008, largestRecordTimestamp=Some(1715075799000)),LogSegment(baseOffset=182547827, size=16872695, lastModifiedTime=1715075803273, largestRecordTimestamp=Some(1715075803251)),LogSegment(baseOffset=182550001, size=16999890, lastModifiedTime=1715075808368, largestRecordTimestamp=Some(1715075808355)),LogSegment(baseOffset=182552113, size=16959982, lastModifiedTime=1715075813294, largestRecordTimestamp=Some(1715075813293)),LogSegment(baseOffset=182554415, size=16988073, lastModifiedTime=1715075817816, largestRecordTimestamp=Some(1715075817783)),LogSegment(baseOffset=182556814, size=16974731, lastModifiedTime=1715075823018, largestRecordTimestamp=Some(1715075823016)),LogSegment(baseOffset=182559282, size=16996090, lastModifiedTime=1715075828672, largestRecordTimestamp=Some(1715075828632)),LogSegment(baseOffset=182561708, size=16999327, lastModifiedTime=1715075833742, largestRecordTimestamp=Some(1715075833709)),LogSegment(baseOffset=182564173, size=16992947, lastModifiedTime=1715075839121, largestRecordTimestamp=Some(1715075839114)),LogSegment(baseOffset=182566740, size=16982572, lastModifiedTime=1715075844268, largestRecordTimestamp=Some(1715075844254)),LogSegment(baseOffset=182569086, size=16994786, lastModifiedTime=1715075850659, largestRecordTimestamp=Some(1715075850642)),LogSegment(baseOffset=182571815, size=16998391, lastModifiedTime=1715075856704, largestRecordTimestamp=Some(1715075856684)),LogSegment(baseOffset=182574372, size=16994403, lastModifiedTime=1715075861956, largestRecordTimestamp=Some(1715075861922)),LogSegment(baseOffset=182576828, size=16984546, lastModifiedTime=1715075865194, largestRecordTimestamp=Some(1715075865180)),LogSegment(baseOffset=182578716, size=16987846, lastModifiedTime=1715075868470, largestRecordTimestamp=Some(1715075868460)),LogSegment(baseOffset=182580437, size=16958237, lastModifiedTime=1715075873168, largestRecordTimestamp=Some(1715075873151)),LogSegment(baseOffset=182582637, size=16999432, lastModifiedTime=1715075877858, largestRecordTimestamp=Some(1715075877850)),LogSegment(baseOffset=182585006, size=16938567, lastModifiedTime=1715075882952, largestRecordTimestamp=Some(1715075882938)),LogSegment(baseOffset=182587493, size=16998214, lastModifiedTime=1715075888306, largestRecordTimestamp=Some(1715075888285)),LogSegment(baseOffset=182589965, size=16996264, lastModifiedTime=1715075893370, largestRecordTimestamp=Some(1715075893365)),LogSegment(baseOffset=182592327, size=16991650, lastModifiedTime=1715075898806, largestRecordTimestamp=Some(1715075898802)),LogSegment(baseOffset=182594863, size=16998234, lastModifiedTime=1715075903737, largestRecordTimestamp=Some(1715075903733)),LogSegment(baseOffset=182597289, size=16996241, lastModifiedTime=1715075908805, largestRecordTimestamp=Some(1715075908797)),LogSegment(baseOffset=182599811, size=16993657, lastModifiedTime=1715075913918, largestRecordTimestamp=Some(1715075913915)),LogSegment(baseOffset=182602171, size=16993112, lastModifiedTime=1715075918570, largestRecordTimestamp=Some(1715075918570)),LogSegment(baseOffset=182604245, size=16959963, lastModifiedTime=1715075922720, largestRecordTimestamp=Some(1715075922714)),LogSegment(baseOffset=182606451, size=16998518, lastModifiedTime=1715075927490, largestRecordTimestamp=Some(1715075927484)),LogSegment(baseOffset=182608616, size=16999103, lastModifiedTime=1715075932207, largestRecordTimestamp=Some(1715075932188)),LogSegment(baseOffset=182610888, size=16999389, lastModifiedTime=1715075937118, largestRecordTimestamp=Some(1715075937103)),LogSegment(baseOffset=182613221, size=16982597, lastModifiedTime=1715075942170, largestRecordTimestamp=Some(1715075942153)),LogSegment(baseOffset=182615634, size=16986904, lastModifiedTime=1715075947544, largestRecordTimestamp=Some(1715075947541)),LogSegment(baseOffset=182618074, size=16998820, lastModifiedTime=1715075952370, largestRecordTimestamp=Some(1715075952351)),LogSegment(baseOffset=182620446, size=16985066, lastModifiedTime=1715075957884, largestRecordTimestamp=Some(1715075957865)),LogSegment(baseOffset=182623007, size=16998235, lastModifiedTime=1715075963030, largestRecordTimestamp=Some(1715075963008)),LogSegment(baseOffset=182625520, size=16987568, lastModifiedTime=1715075967944, largestRecordTimestamp=Some(1715075967934)),LogSegment(baseOffset=182627921, size=16997118, lastModifiedTime=1715075973216, largestRecordTimestamp=Some(1715075973204)),LogSegment(baseOffset=182630290, size=16978465, lastModifiedTime=1715075978064, largestRecordTimestamp=Some(1715075978053)),LogSegment(baseOffset=182632463, size=16901644, lastModifiedTime=1715075982228, largestRecordTimestamp=Some(1715075982211)),LogSegment(baseOffset=182634546, size=16992477, lastModifiedTime=1715075986935, largestRecordTimestamp=Some(1715075986914)),LogSegment(baseOffset=182636738, size=16951087, lastModifiedTime=1715075991658, largestRecordTimestamp=Some(1715075991636)),LogSegment(baseOffset=182639001, size=16994471, lastModifiedTime=1715075996281, largestRecordTimestamp=Some(1715075996266)),LogSegment(baseOffset=182641298, size=16995754, lastModifiedTime=1715076001319, largestRecordTimestamp=Some(1715076001269)),LogSegment(baseOffset=182643712, size=16992752, lastModifiedTime=1715076006604, largestRecordTimestamp=Some(1715076006583)),LogSegment(baseOffset=182646095, size=16992944, lastModifiedTime=1715076011511, largestRecordTimestamp=Some(1715076011470)),LogSegment(baseOffset=182648504, size=16998993, lastModifiedTime=1715076016908, largestRecordTimestamp=Some(1715076016908)),LogSegment(baseOffset=182651018, size=16996765, lastModifiedTime=1715076021971, largestRecordTimestamp=Some(1715076021968)),LogSegment(baseOffset=182653526, size=16995808, lastModifiedTime=1715076026767, largestRecordTimestamp=Some(1715076026752)),LogSegment(baseOffset=182655860, size=16993535, lastModifiedTime=1715076032181, largestRecordTimestamp=Some(1715076032131)),LogSegment(baseOffset=182658341, size=16971926, lastModifiedTime=1715076037067, largestRecordTimestamp=Some(1715076037053)) (kafka.log.LocalLog$)`, + `[2024-05-07 <_> INFO Deleted producer state snapshot <_> (kafka.log.SnapshotFile)`, + `[2024-05-07 <_> INFO Deleted offset index <_> (kafka.log.LogSegment)`, + `[2024-05-07 <_> INFO Deleted log <_> (kafka.log.LogSegment)`, + `[2024-05-07 <_> INFO Deleted time index <_> (kafka.log.LogSegment)`, + }, + }, + { + name: "Patterns for kubernetes logs", + drain: New(DefaultConfig()), + inputFile: "testdata/kubernetes.txt", + patterns: []string{ + "I0507 12:04:17.596484 1 highnodeutilization.go:107] \"Criteria for a node below target utilization\" CPU=50 Mem=50 Pods=100", + "I0507 12:04:17.595169 1 descheduler.go:155] Building a pod evictor", + "I0507 <_> 1 <_> \"Number of <_> <_> <_>", + "I0507 <_> 1 <_> \"Total <_> <_> <_> <_> <_> <_> <_>", + "I0507 <_> 1 <_> <_> <_> <_> <_> <_> <_> <_> <_> <_>", + "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" pod:=\"loki-dev-005/querier-burst-6b5f6db455-5zvkm\" <_> error:=\"[insufficient <_> insufficient <_>", + "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" pod:=\"loki-dev-005/querier-burst-6b5f6db455-5zvkm\" <_> error:=\"pod node selector does not match the node label\"", + "I0507 <_> 1 <_> \"Pods on node\" <_> <_> <_> <_>", + "I0507 12:02:27.947830 1 nodeutilization.go:274] \"Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers\"", + "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"pod has local storage and descheduler is not configured with evictLocalStoragePods\"", + "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"pod is a DaemonSet pod\"", + "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]\"", + "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"[pod is a mirror pod, pod is a static pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]\"", + "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]\"", + "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"[pod <_> <_> <_> <_> pod has higher priority than specified priority class threshold]\"", + "I0507 <_> 1 defaultevictor.go:163] \"pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable\" <_>", + "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" <_> <_> error:=\"[pod node selector does not match the node label, pod does not tolerate taints on the node]\"", + "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold]\"", + "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" <_> <_> error:=\"[pod node selector does not match the node label, insufficient <_> insufficient <_> insufficient pods]\"", + "I0507 <_> 1 defaultevictor.go:202] \"Pod fails the following checks\" <_> checks=\"[pod <_> <_> <_> <_> pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]\"", + "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" <_> <_> error:=\"[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_> insufficient <_>", + "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" <_> <_> error:=\"insufficient cpu\"", + "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" <_> <_> error:=\"[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient <_>", + "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" <_> <_> error:=\"[pod node selector does not match the node label, insufficient <_> insufficient <_>", + "I0507 <_> 1 node.go:157] \"Pod does not fit on any other node\" <_> <_> error:=\"[pod node selector does not match the node label, insufficient <_>", + "I0507 <_> 1 <_> <_> <_> <_> <_> <_> <_>", + }, + }, + { + name: "Patterns for vault logs", + drain: New(DefaultConfig()), + inputFile: "testdata/vault.txt", + patterns: []string{ + "<_> [INFO] expiration: revoked lease: <_>", + }, + }, + { + name: "Patterns for calico logs", + drain: New(DefaultConfig()), + inputFile: "testdata/calico.txt", + patterns: []string{ + `2024-05-08 <_> [DEBUG][216945] felix/table.go 870: Found forward-reference <_> ipVersion=0x4 <_> <_> [0:0]" table="nat"`, + `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line="*nat" table="nat"`, + `2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 881: Not an append, skipping ipVersion=0x4 line="# Generated by iptables-nft-save v1.8.4 on Wed May 8 15:23:58 2024" table="nat"`, + `2024-05-08 15:23:58.715 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line="# Generated by iptables-nft-save v1.8.4 on Wed May 8 15:23:58 2024" table="nat"`, + `2024-05-08 15:23:58.684 [DEBUG][216945] felix/versionparse.go 118: Parsed kernel version version=5.15.0-1057`, + `2024-05-08 15:23:58.684 [DEBUG][216945] felix/versionparse.go 110: Raw kernel version rawVersion="Linux version 5.15.0-1057-azure (buildd@lcy02-amd64-033) (gcc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0, GNU ld (GNU Binutils for Ubuntu) 2.38) #65-Ubuntu SMP Fri Feb 9 18:39:24 UTC 2024\n"`, + `2024-05-08 15:23:58.684 [DEBUG][216945] felix/feature_detect.go 242: Ran iptables --version rawVersion="iptables v1.8.4 (legacy)\n"`, + `2024-05-08 <_> [DEBUG][216945] felix/feature_detect.go <_> <_> <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][216945] felix/table.go <_> <_> <_> <_> <_> <_> <_> table="nat"`, + `2024-05-08 <_> [DEBUG][3576126] felix/int_dataplane.go <_> <_> <_> for MTU <_> <_> <_>`, + `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", ""} chainName="INPUT" expectedRuleIDs=[]string{"Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`, + `2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "tVnHkvAo15HuiPy0", "", "", "", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter"`, + `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "", "", "", "", "", "", "", "", "tVnHkvAo15HuiPy0", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="raw"`, + `2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "6gwbT8clXdHdC1b1"} chainName="PREROUTING" expectedRuleIDs=[]string{"6gwbT8clXdHdC1b1", "", "", "", ""} ipVersion=0x4 table="raw"`, + `2024-05-08 <_> <_> felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" <_>`, + `2024-05-08 15:23:58.604 [DEBUG][65] felix/ipsets.go <_> <_> <_> <_> <_> <_> <_>`, + `2024-05-08 <_> <_> felix/ipsets.go 366: Finished IPSets resync family="inet" numInconsistenciesFound=0 <_>`, + `2024-05-08 <_> <_> felix/ipsets.go 589: Whitelisting IP sets. <_> family="inet" <_>`, + `2024-05-08 <_> <_> felix/ipsets.go 467: Found member in dataplane <_> family="inet" <_> setID="this-host"`, + `2024-05-08 <_> <_> felix/ipsets.go 234: Asked to resync with the dataplane on next update. family="inet"`, + `bird: Netlink: No route to host`, + `2024-05-08 <_> [DEBUG][76] felix/route_table.go 661: Syncing interface routes <_> <_> ipVersion=0x4 <_>`, + `2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 584: Flag no OIF for full re-sync`, + `2024-05-08 <_> [DEBUG][76] felix/route_table.go <_> <_> <_> <_> <_> <_> <_> ipVersion=0x4 <_>`, + `2024-05-08 15:23:56.617 [DEBUG][76] felix/wireguard.go 1503: Wireguard is disabled and does not exist ifaceName="wireguard.cali" ipVersion=0x4`, + `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 533: Check interfaces matching regex`, + `2024-05-08 15:23:56.615 [DEBUG][76] felix/wireguard.go 654: Wireguard is not in-sync - verifying wireguard configuration is removed ipVersion=0x4`, + `2024-05-08 <_> <_> felix/ipsets.go 426: Parsing IP set. family="inet" <_>`, + `2024-05-08 <_> <_> felix/ipsets.go <_> <_> <_> <_> <_> family="inet"`, + `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_rule.go 179: Queueing a resync of routing rules. ipVersion=4`, + `2024-05-08 15:23:56.615 [DEBUG][76] felix/wireguard.go 605: Queueing a resync of wireguard configuration ipVersion=0x4`, + `2024-05-08 <_> <_> felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4`, + `2024-05-08 <_> [DEBUG][76] felix/route_table.go 880: Processing route: 254 <_> <_> <_> ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0`, + `2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 480: Queueing a resync of routing table. <_> ipVersion=0x4 <_>`, + `2024-05-08 <_> [DEBUG][216945] felix/xdp_state.go <_> <_> <_> <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][65] felix/xdp_state.go <_> <_> <_> <_> <_> <_>`, + `2024-05-08 15:23:58.035 [DEBUG][501368] felix/xdp_state.go <_> <_> <_> <_> <_> <_>`, + `2024-05-08 15:23:57.886 [DEBUG][3503680] felix/xdp_state.go <_> <_> <_> <_> <_> <_>`, + `2024-05-08 15:23:57.239 [DEBUG][732993] felix/xdp_state.go <_> <_> <_> <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][76] felix/xdp_state.go <_> <_> <_> <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][216945] felix/xdp_state.go <_> <_> <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][65] felix/xdp_state.go <_> <_> <_> <_> <_>`, + `2024-05-08 15:23:58.035 [DEBUG][501368] felix/xdp_state.go <_> <_> <_> <_> <_>`, + `2024-05-08 15:23:57.886 [DEBUG][3503680] felix/xdp_state.go <_> <_> <_> <_> <_>`, + `2024-05-08 15:23:57.239 [DEBUG][732993] felix/xdp_state.go <_> <_> <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][76] felix/xdp_state.go <_> <_> <_> <_> <_>`, + `2024-05-08 <_> <_> felix/xdp_state.go 1270: Finished processing pending diff state. bpfActions=intdataplane.xdpBPFActions{CreateMap:set.Typed[string]{}, RemoveMap:set.Typed[string]{}, AddToMap:map[string]map[string]uint32{}, RemoveFromMap:map[string]map[string]uint32{}, InstallXDP:set.Typed[string]{}, UninstallXDP:set.Typed[string]{}, MembersToDrop:map[string]map[string]uint32{}, MembersToAdd:map[string]map[string]uint32{}} family=4 newCS=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}}`, + `2024-05-08 <_> <_> felix/xdp_state.go 1043: Processing pending diff state. cs=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} family=4`, + `2024-05-08 <_> <_> felix/endpoint_mgr.go 443: Reporting endpoint status. dirtyEndpoints=set.Set{}`, + `2024-05-08 15:23:58.680 [DEBUG][216945] felix/int_dataplane.go <_> <_> <_> <_>`, + `2024-05-08 <_> <_> felix/int_dataplane.go 1807: Applying dataplane updates`, + `2024-05-08 15:23:56.614 [DEBUG][76] felix/int_dataplane.go 1777: Refreshing routes`, + `2024-05-08 <_> <_> felix/sync_client.go <_> <_> <_> <_> Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type=""`, + `2024-05-08 <_> <_> felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, <_> <_> time.Local)}} type=""`, + `2024-05-08 <_> <_> felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"}`, + `2024-05-08 <_> <_> felix/health.go 196: Checking state of reporter <_> reports:health.HealthReport{Live:true, Ready:true, Detail:""}, <_> latest:health.HealthReport{Live:true, Ready:true, Detail:""}, <_> <_> loc:(*time.Location)(0x4ce3aa0)}}`, + `2024-05-08 <_> [DEBUG][501368] felix/health.go <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][76] felix/health.go <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][3576126] felix/health.go <_> <_> <_>`, + `2024-05-08 15:23:57.701 [DEBUG][216945] felix/health.go <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][3583983] felix/health.go <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][3596528] felix/health.go <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][65] felix/health.go <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][3383360] felix/health.go <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][3435880] felix/health.go <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][3794357] felix/health.go <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][88347] felix/health.go <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][990568] felix/health.go <_> <_> <_>`, + `2024-05-08 15:23:56.615 [DEBUG][2460733] felix/health.go <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][3503680] felix/health.go <_> <_> <_>`, + `2024-05-08 <_> <_> felix/summary.go 100: Summarising <_> dataplane reconciliation loops over <_> <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][65] felix/int_dataplane.go <_> <_> <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][501368] felix/int_dataplane.go <_> <_> <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][3503680] felix/int_dataplane.go <_> <_> <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][732993] felix/int_dataplane.go <_> <_> <_> <_> <_>`, + `2024-05-08 <_> [DEBUG][216945] felix/int_dataplane.go <_> <_> <_> <_> <_>`, + `2024-05-08 <_> <_> felix/int_dataplane.go 2080: Asked to reschedule. <_>`, + `2024-05-08 15:23:58.684 [DEBUG][216945] felix/table.go 604: Loading current iptables state and checking it is correct. ipVersion=0x4 table="nat"`, + `2024-05-08 15:23:58.681 [DEBUG][216945] felix/table.go <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> ipVersion=0x4 <_>`, + `2024-05-08 15:23:58.605 [DEBUG][65] felix/table.go <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> ipVersion=0x4 table="filter"`, + `2024-05-08 15:23:58.604 [DEBUG][65] felix/table.go <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> ipVersion=0x4 <_>`, + `2024-05-08 15:23:58.038 [DEBUG][501368] felix/table.go <_> <_> <_> <_> <_> <_> <_> <_> <_> <_> ipVersion=0x4 <_>`, + `2024-05-08 <_> <_> felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 <_>`, + `2024-05-08 <_> <_> felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 <_>`, + `2024-05-08 <_> [DEBUG][615489] felix/table.go <_> <_> <_> <_> <_> ipVersion=0x4 table="filter"`, }, }, } @@ -128,23 +383,27 @@ func TestDrain_TrainGeneratesMatchablePatterns(t *testing.T) { inputLines []string }{ { - name: "should match each line against a pattern", + name: `should match each line against a pattern`, drain: New(DefaultConfig()), inputLines: []string{ - `test test test`, - `test test test`, - `test test test`, - `test test test`, + "test test test", + "test test test", + "test test test", + "test test test", }, }, { - name: "should also match newlines", + name: `should also match newlines`, drain: New(DefaultConfig()), inputLines: []string{ - "test test test\n", - "test test test\n", - "test test test\n", - "test test test\n", + `test test test +`, + `test test test +`, + `test test test +`, + `test test test +`, }, }, } @@ -154,11 +413,11 @@ func TestDrain_TrainGeneratesMatchablePatterns(t *testing.T) { for _, line := range tt.inputLines { tt.drain.Train(line, 0) } - t.Log("Learned clusters", tt.drain.Clusters()) + t.Log(`Learned clusters`, tt.drain.Clusters()) for _, line := range tt.inputLines { match := tt.drain.Match(line) - require.NotNil(t, match, "Line should match a cluster") + require.NotNil(t, match, `Line should match a cluster`) } }) } @@ -173,43 +432,47 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T) inputLines []string }{ { - name: "should extract patterns that all lines match", + name: `should extract patterns that all lines match`, drain: New(DefaultConfig()), inputLines: []string{ - `test 1 test`, - `test 2 test`, - `test 3 test`, - `test 4 test`, + "test 1 test", + "test 2 test", + "test 3 test", + "test 4 test", }, }, { - name: "should extract patterns that match if line ends with newlines", + name: `should extract patterns that match if line ends with newlines`, drain: New(DefaultConfig()), inputLines: []string{ - "test 1 test\n", - "test 2 test\n", - "test 3 test\n", - "test 4 test\n", + `test 1 test +`, + `test 2 test +`, + `test 3 test +`, + `test 4 test +`, }, }, { - name: "should extract patterns that match if line ends with empty space", + name: `should extract patterns that match if line ends with empty space`, drain: New(DefaultConfig()), inputLines: []string{ - "test 1 test ", - "test 2 test ", - "test 3 test ", - "test 4 test ", + `test 1 test `, + `test 2 test `, + `test 3 test `, + `test 4 test `, }, }, { - name: "should extract patterns that match if line starts with empty space", + name: `should extract patterns that match if line starts with empty space`, drain: New(DefaultConfig()), inputLines: []string{ - " test 1 test", - " test 2 test", - " test 3 test", - " test 4 test", + ` test 1 test`, + ` test 2 test`, + ` test 3 test`, + ` test 4 test`, }, }, } @@ -221,14 +484,14 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T) } require.Equal(t, 1, len(tt.drain.Clusters())) cluster := tt.drain.Clusters()[0] - t.Log("Extracted cluster: ", cluster) + t.Log(`Extracted cluster: `, cluster) matcher, err := pattern.ParseLineFilter([]byte(cluster.String())) require.NoError(t, err) for _, line := range tt.inputLines { passes := matcher.Test([]byte(line)) - require.Truef(t, passes, "Line %q should match extracted pattern", line) + require.Truef(t, passes, `Line %q should match extracted pattern`, line) } }) } diff --git a/pkg/pattern/drain/testdata/agent-logfmt.txt b/pkg/pattern/drain/testdata/agent-logfmt.txt index 096c283193e3f..a36f7aee09d60 100644 --- a/pkg/pattern/drain/testdata/agent-logfmt.txt +++ b/pkg/pattern/drain/testdata/agent-logfmt.txt @@ -997,4 +997,4 @@ ts=2024-04-16T15:10:42.557068004Z level=info msg="finished node evaluation" cont ts=2024-04-16T15:10:42.55528547Z level=info msg="finished node evaluation" controller_id=module.http.cloudwatch_pipelines node_id=prometheus.scrape.stack_378175_cloudwatch_notags duration=14.748545ms ts=2024-04-16T15:10:42.555273483Z level=info msg="finished node evaluation" controller_id=module.http.cloudwatch_pipelines node_id=prometheus.scrape.stack_378175_cloudwatch_notags duration=14.735015ms ts=2024-04-16T15:10:42.555218448Z level=info msg="finished node evaluation" controller_id=module.http.cloudwatch_pipelines node_id=prometheus.scrape.stack_378175_cloudwatch_notags duration=38.592855ms -2024-04-16 15:10:42.555 ts=2024-04-16T15:10:42.555230437Z level=info msg="finished node evaluation" controller_id=module.http.cloudwatch_pipelines node_id=prometheus.scrape.stack_378175_cloudwatch_notags duration=38.545339ms \ No newline at end of file +ts=2024-04-16T15:10:42.555230437Z level=info msg="finished node evaluation" controller_id=module.http.cloudwatch_pipelines node_id=prometheus.scrape.stack_378175_cloudwatch_notags duration=38.545339ms \ No newline at end of file diff --git a/pkg/pattern/drain/testdata/calico.txt b/pkg/pattern/drain/testdata/calico.txt new file mode 100644 index 0000000000000..a589526c5e744 --- /dev/null +++ b/pkg/pattern/drain/testdata/calico.txt @@ -0,0 +1,1000 @@ +2024-05-08 15:23:58.902 [DEBUG][606918] felix/sync_client.go 356: Pong sent to Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type="" +2024-05-08 15:23:58.901 [DEBUG][606918] felix/sync_client.go 347: Ping received from Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type="" +2024-05-08 15:23:58.901 [DEBUG][606918] felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, 58, 899315286, time.Local)}} type="" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-P53KRBBAHF7EH6MF - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-527TDH7QDHLCYDTX" ipVersion=0x4 line=":KUBE-SEP-527TDH7QDHLCYDTX - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-527TDH7QDHLCYDTX - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-IOPUYNOJID4CYL5S" ipVersion=0x4 line=":KUBE-SEP-IOPUYNOJID4CYL5S - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-IOPUYNOJID4CYL5S - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-YADSGSG25SR3HQ6W" ipVersion=0x4 line=":KUBE-SEP-YADSGSG25SR3HQ6W - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-YADSGSG25SR3HQ6W - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-DZCXKX63Q3ZRE2XB" ipVersion=0x4 line=":KUBE-SEP-DZCXKX63Q3ZRE2XB - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-DZCXKX63Q3ZRE2XB - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-RK34UV6XMAMZC6JG" ipVersion=0x4 line=":KUBE-SEP-RK34UV6XMAMZC6JG - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-RK34UV6XMAMZC6JG - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-5KVHYONDUWXKZLCF" ipVersion=0x4 line=":KUBE-SEP-5KVHYONDUWXKZLCF - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-5KVHYONDUWXKZLCF - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-WKQFW72ZLNYTB4P7" ipVersion=0x4 line=":KUBE-SEP-WKQFW72ZLNYTB4P7 - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-WKQFW72ZLNYTB4P7 - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-VQOBWT5QN7AMFSUO" ipVersion=0x4 line=":KUBE-SEP-VQOBWT5QN7AMFSUO - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-VQOBWT5QN7AMFSUO - [0:0]" table="nat" +2024-05-08 15:23:58.735 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-TX2E3S6G3BZ6VCYU" ipVersion=0x4 line=":KUBE-SEP-TX2E3S6G3BZ6VCYU - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-TX2E3S6G3BZ6VCYU - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-E2GHKVOJHBYBPZ3C" ipVersion=0x4 line=":KUBE-SEP-E2GHKVOJHBYBPZ3C - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-E2GHKVOJHBYBPZ3C - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-JLFGSS5Y56HOFTOX" ipVersion=0x4 line=":KUBE-SEP-JLFGSS5Y56HOFTOX - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-JLFGSS5Y56HOFTOX - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-YCN2JZZKB3DRPNC4" ipVersion=0x4 line=":KUBE-SEP-YCN2JZZKB3DRPNC4 - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-YCN2JZZKB3DRPNC4 - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-U4I77HBN3HVEYELA" ipVersion=0x4 line=":KUBE-SEP-U4I77HBN3HVEYELA - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-U4I77HBN3HVEYELA - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-TS6C4FBECULI2LCC" ipVersion=0x4 line=":KUBE-SVC-TS6C4FBECULI2LCC - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-TS6C4FBECULI2LCC - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-L5HKBC4ZNNL6TTAI" ipVersion=0x4 line=":KUBE-SEP-L5HKBC4ZNNL6TTAI - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-L5HKBC4ZNNL6TTAI - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-5FBWZB3VK6S5YQEJ" ipVersion=0x4 line=":KUBE-SEP-5FBWZB3VK6S5YQEJ - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-5FBWZB3VK6S5YQEJ - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-RK657RLKDNVNU64O" ipVersion=0x4 line=":KUBE-SVC-RK657RLKDNVNU64O - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-RK657RLKDNVNU64O - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-RWLGYI4KFGAIXSUX" ipVersion=0x4 line=":KUBE-SEP-RWLGYI4KFGAIXSUX - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-RWLGYI4KFGAIXSUX - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-7BTF33YDKSIYEVES" ipVersion=0x4 line=":KUBE-SEP-7BTF33YDKSIYEVES - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-7BTF33YDKSIYEVES - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-HSUQE2CHWJUY2C6R" ipVersion=0x4 line=":KUBE-SEP-HSUQE2CHWJUY2C6R - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-HSUQE2CHWJUY2C6R - [0:0]" table="nat" +2024-05-08 15:23:58.734 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-KZRE3VWRGTXUTK6V" ipVersion=0x4 line=":KUBE-SVC-KZRE3VWRGTXUTK6V - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-KZRE3VWRGTXUTK6V - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-EKIJOT25G7M52CUX" ipVersion=0x4 line=":KUBE-SEP-EKIJOT25G7M52CUX - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-EKIJOT25G7M52CUX - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-CUFKKLOUSLP5E7MN" ipVersion=0x4 line=":KUBE-SVC-CUFKKLOUSLP5E7MN - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-CUFKKLOUSLP5E7MN - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-X2YH7Y3ZC6UMNUBF" ipVersion=0x4 line=":KUBE-SVC-X2YH7Y3ZC6UMNUBF - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-X2YH7Y3ZC6UMNUBF - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-SD2TGVUXDFKK67OH" ipVersion=0x4 line=":KUBE-SVC-SD2TGVUXDFKK67OH - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-SD2TGVUXDFKK67OH - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-CLM54V3O433WFRYU" ipVersion=0x4 line=":KUBE-SEP-CLM54V3O433WFRYU - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-CLM54V3O433WFRYU - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-J73TGZ57OY73QEMH" ipVersion=0x4 line=":KUBE-SVC-J73TGZ57OY73QEMH - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-J73TGZ57OY73QEMH - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-IIMYVCRPQLPIWIWO" ipVersion=0x4 line=":KUBE-SEP-IIMYVCRPQLPIWIWO - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-IIMYVCRPQLPIWIWO - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-AG5Y2FRMGRWC2ZXO" ipVersion=0x4 line=":KUBE-SVC-AG5Y2FRMGRWC2ZXO - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-AG5Y2FRMGRWC2ZXO - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-FQZ327T2ZAZA2XLF" ipVersion=0x4 line=":KUBE-SEP-FQZ327T2ZAZA2XLF - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-FQZ327T2ZAZA2XLF - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-RBCVOSJIBGSGMVR4" ipVersion=0x4 line=":KUBE-SEP-RBCVOSJIBGSGMVR4 - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-RBCVOSJIBGSGMVR4 - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-JK7SFUIWMGLVMFRB" ipVersion=0x4 line=":KUBE-SVC-JK7SFUIWMGLVMFRB - [0:0]" table="nat" +2024-05-08 15:23:58.733 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-JK7SFUIWMGLVMFRB - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-IT4WWUY4CU36ZCLW" ipVersion=0x4 line=":KUBE-SVC-IT4WWUY4CU36ZCLW - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-IT4WWUY4CU36ZCLW - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-EXT-IT4WWUY4CU36ZCLW" ipVersion=0x4 line=":KUBE-EXT-IT4WWUY4CU36ZCLW - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-EXT-IT4WWUY4CU36ZCLW - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-CR4K3MAUOTBI5Q3J" ipVersion=0x4 line=":KUBE-SEP-CR4K3MAUOTBI5Q3J - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-CR4K3MAUOTBI5Q3J - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-LY3LCAWVOCKS3CWB" ipVersion=0x4 line=":KUBE-SVC-LY3LCAWVOCKS3CWB - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-LY3LCAWVOCKS3CWB - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-PHAWWJ5ZQH3LWBU5" ipVersion=0x4 line=":KUBE-SEP-PHAWWJ5ZQH3LWBU5 - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-PHAWWJ5ZQH3LWBU5 - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-U7SXUNEARU4YPLE2" ipVersion=0x4 line=":KUBE-SVC-U7SXUNEARU4YPLE2 - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-U7SXUNEARU4YPLE2 - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-SN44PXHCQMEOI7E5" ipVersion=0x4 line=":KUBE-SEP-SN44PXHCQMEOI7E5 - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-SN44PXHCQMEOI7E5 - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-ZADHL4UIKPR2OF6K" ipVersion=0x4 line=":KUBE-SEP-ZADHL4UIKPR2OF6K - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-ZADHL4UIKPR2OF6K - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-J3PVSPN3RTQS7GVE" ipVersion=0x4 line=":KUBE-SEP-J3PVSPN3RTQS7GVE - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-J3PVSPN3RTQS7GVE - [0:0]" table="nat" +2024-05-08 15:23:58.732 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-OIUKEGP77GBKF6Z7" ipVersion=0x4 line=":KUBE-SVC-OIUKEGP77GBKF6Z7 - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-OIUKEGP77GBKF6Z7 - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-EXT-OIUKEGP77GBKF6Z7" ipVersion=0x4 line=":KUBE-EXT-OIUKEGP77GBKF6Z7 - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-EXT-OIUKEGP77GBKF6Z7 - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-VBHPBAF72XG6J2RE" ipVersion=0x4 line=":KUBE-SEP-VBHPBAF72XG6J2RE - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-VBHPBAF72XG6J2RE - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-TZ7Y4ZHPDOMAVITB" ipVersion=0x4 line=":KUBE-SVC-TZ7Y4ZHPDOMAVITB - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-TZ7Y4ZHPDOMAVITB - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-52ZMRLUM4LOS6UPE" ipVersion=0x4 line=":KUBE-SVC-52ZMRLUM4LOS6UPE - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-52ZMRLUM4LOS6UPE - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-5W6XNHA4VTWXVW5N" ipVersion=0x4 line=":KUBE-SEP-5W6XNHA4VTWXVW5N - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-5W6XNHA4VTWXVW5N - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-3MODYIURLI6GV2LQ" ipVersion=0x4 line=":KUBE-SEP-3MODYIURLI6GV2LQ - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-3MODYIURLI6GV2LQ - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-Z3Q7YNHRMQOR6ALG" ipVersion=0x4 line=":KUBE-SVC-Z3Q7YNHRMQOR6ALG - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-Z3Q7YNHRMQOR6ALG - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-D5UPVGTYKPIILLQM" ipVersion=0x4 line=":KUBE-SEP-D5UPVGTYKPIILLQM - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-D5UPVGTYKPIILLQM - [0:0]" table="nat" +2024-05-08 15:23:58.731 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-ANZN2MLUJ2E4HNGN" ipVersion=0x4 line=":KUBE-SEP-ANZN2MLUJ2E4HNGN - [0:0]" table="nat" +2024-05-08 15:23:58.730 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-ANZN2MLUJ2E4HNGN - [0:0]" table="nat" +2024-05-08 15:23:58.730 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-7UYEPBQA6QDH7RGJ" ipVersion=0x4 line=":KUBE-SVC-7UYEPBQA6QDH7RGJ - [0:0]" table="nat" +2024-05-08 15:23:58.730 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-7UYEPBQA6QDH7RGJ - [0:0]" table="nat" +2024-05-08 15:23:58.730 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-BQBCPDQ273PDK62R" ipVersion=0x4 line=":KUBE-SEP-BQBCPDQ273PDK62R - [0:0]" table="nat" +2024-05-08 15:23:58.730 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-BQBCPDQ273PDK62R - [0:0]" table="nat" +2024-05-08 15:23:58.730 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-WESIIMN4SLELJTF3" ipVersion=0x4 line=":KUBE-SEP-WESIIMN4SLELJTF3 - [0:0]" table="nat" +2024-05-08 15:23:58.730 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-WESIIMN4SLELJTF3 - [0:0]" table="nat" +2024-05-08 15:23:58.730 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-W3WIBS4GJN6KZ2WW" ipVersion=0x4 line=":KUBE-SEP-W3WIBS4GJN6KZ2WW - [0:0]" table="nat" +2024-05-08 15:23:58.730 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-W3WIBS4GJN6KZ2WW - [0:0]" table="nat" +2024-05-08 15:23:58.730 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-KBEKUWMHE4C6SZ7N" ipVersion=0x4 line=":KUBE-SVC-KBEKUWMHE4C6SZ7N - [0:0]" table="nat" +2024-05-08 15:23:58.730 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-KBEKUWMHE4C6SZ7N - [0:0]" table="nat" +2024-05-08 15:23:58.730 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-ZEAOESFGQJP3UNGY" ipVersion=0x4 line=":KUBE-SVC-ZEAOESFGQJP3UNGY - [0:0]" table="nat" +2024-05-08 15:23:58.730 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-ZEAOESFGQJP3UNGY - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-I3FNB3EP6IKUZIYZ" ipVersion=0x4 line=":KUBE-SEP-I3FNB3EP6IKUZIYZ - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-I3FNB3EP6IKUZIYZ - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-XYB3XTKJEHCT7QLJ" ipVersion=0x4 line=":KUBE-SEP-XYB3XTKJEHCT7QLJ - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-XYB3XTKJEHCT7QLJ - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-I4PVH4LZS6WVJ7IK" ipVersion=0x4 line=":KUBE-SEP-I4PVH4LZS6WVJ7IK - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-I4PVH4LZS6WVJ7IK - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-36YG6YT5FYZEAFWC" ipVersion=0x4 line=":KUBE-SEP-36YG6YT5FYZEAFWC - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-36YG6YT5FYZEAFWC - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-SJCXZ2U3QOARHFCT" ipVersion=0x4 line=":KUBE-SEP-SJCXZ2U3QOARHFCT - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-SJCXZ2U3QOARHFCT - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-VW4NXVTDZKAUPNOF" ipVersion=0x4 line=":KUBE-SEP-VW4NXVTDZKAUPNOF - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-VW4NXVTDZKAUPNOF - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-PCL3G4EEXCQ2FSVM" ipVersion=0x4 line=":KUBE-SEP-PCL3G4EEXCQ2FSVM - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-PCL3G4EEXCQ2FSVM - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-6IXJFW44C4LHV2LJ" ipVersion=0x4 line=":KUBE-SEP-6IXJFW44C4LHV2LJ - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-6IXJFW44C4LHV2LJ - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-DSNEGDD6UCC4LZYX" ipVersion=0x4 line=":KUBE-SEP-DSNEGDD6UCC4LZYX - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-DSNEGDD6UCC4LZYX - [0:0]" table="nat" +2024-05-08 15:23:58.729 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-YAXE2XEB7GDZIGFZ" ipVersion=0x4 line=":KUBE-SVC-YAXE2XEB7GDZIGFZ - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-YAXE2XEB7GDZIGFZ - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-P5ZB5AIOCRJ4RV4Y" ipVersion=0x4 line=":KUBE-SEP-P5ZB5AIOCRJ4RV4Y - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-P5ZB5AIOCRJ4RV4Y - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-IQOJZFBO3XPPCRJB" ipVersion=0x4 line=":KUBE-SEP-IQOJZFBO3XPPCRJB - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-IQOJZFBO3XPPCRJB - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-WN3P4DNLGLC3N47B" ipVersion=0x4 line=":KUBE-SVC-WN3P4DNLGLC3N47B - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-WN3P4DNLGLC3N47B - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-SSDYILC3J6ZPOTZY" ipVersion=0x4 line=":KUBE-SEP-SSDYILC3J6ZPOTZY - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-SSDYILC3J6ZPOTZY - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-4FPN6PFSRIMNDQ7J" ipVersion=0x4 line=":KUBE-SVC-4FPN6PFSRIMNDQ7J - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-4FPN6PFSRIMNDQ7J - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-RBJEJ2L6RDASGURQ" ipVersion=0x4 line=":KUBE-SEP-RBJEJ2L6RDASGURQ - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-RBJEJ2L6RDASGURQ - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-LABIPR5ZW4D7G3U6" ipVersion=0x4 line=":KUBE-SVC-LABIPR5ZW4D7G3U6 - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-LABIPR5ZW4D7G3U6 - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-ZEVKU6VDBL3VFBC7" ipVersion=0x4 line=":KUBE-SVC-ZEVKU6VDBL3VFBC7 - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-ZEVKU6VDBL3VFBC7 - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-OOX6R46TNF7QL54W" ipVersion=0x4 line=":KUBE-SEP-OOX6R46TNF7QL54W - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-OOX6R46TNF7QL54W - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-MVG7ISBBNKSJ7QVA" ipVersion=0x4 line=":KUBE-SEP-MVG7ISBBNKSJ7QVA - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-MVG7ISBBNKSJ7QVA - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-TQIUDEPA4OJU54DK" ipVersion=0x4 line=":KUBE-SVC-TQIUDEPA4OJU54DK - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-TQIUDEPA4OJU54DK - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-GPWHVMSJP7EBWEDE" ipVersion=0x4 line=":KUBE-SEP-GPWHVMSJP7EBWEDE - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-GPWHVMSJP7EBWEDE - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-HNGBSZBMZD34I3OQ" ipVersion=0x4 line=":KUBE-SEP-HNGBSZBMZD34I3OQ - [0:0]" table="nat" +2024-05-08 15:23:58.728 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-HNGBSZBMZD34I3OQ - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-HZJ2P4B3D6V35OK4" ipVersion=0x4 line=":KUBE-SVC-HZJ2P4B3D6V35OK4 - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-HZJ2P4B3D6V35OK4 - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-LSDFNFI4HUJAEADV" ipVersion=0x4 line=":KUBE-SEP-LSDFNFI4HUJAEADV - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-LSDFNFI4HUJAEADV - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-CFL7VWM7QIC62UTX" ipVersion=0x4 line=":KUBE-SVC-CFL7VWM7QIC62UTX - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-CFL7VWM7QIC62UTX - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-R3IN3KSCCGHIDHGX" ipVersion=0x4 line=":KUBE-SVC-R3IN3KSCCGHIDHGX - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-R3IN3KSCCGHIDHGX - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-VQKZ4FN322AVJFOA" ipVersion=0x4 line=":KUBE-SEP-VQKZ4FN322AVJFOA - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-VQKZ4FN322AVJFOA - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-7LSPN7XW4KB3RWF2" ipVersion=0x4 line=":KUBE-SVC-7LSPN7XW4KB3RWF2 - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-7LSPN7XW4KB3RWF2 - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-UKUAQGQTVKWUAAWC" ipVersion=0x4 line=":KUBE-SEP-UKUAQGQTVKWUAAWC - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-UKUAQGQTVKWUAAWC - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-NYTFSMYLRFDMCAJE" ipVersion=0x4 line=":KUBE-SVC-NYTFSMYLRFDMCAJE - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-NYTFSMYLRFDMCAJE - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-YJZRYUZS3AHJBPJV" ipVersion=0x4 line=":KUBE-SEP-YJZRYUZS3AHJBPJV - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-YJZRYUZS3AHJBPJV - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-X2747OKRD2IICBKX" ipVersion=0x4 line=":KUBE-SEP-X2747OKRD2IICBKX - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-X2747OKRD2IICBKX - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-6VGOIRXZOHMREOOH" ipVersion=0x4 line=":KUBE-SVC-6VGOIRXZOHMREOOH - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-6VGOIRXZOHMREOOH - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-Z6242HF2AGRTPORB" ipVersion=0x4 line=":KUBE-SEP-Z6242HF2AGRTPORB - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-Z6242HF2AGRTPORB - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-VXPZDRYSSF3ZHXC5" ipVersion=0x4 line=":KUBE-SEP-VXPZDRYSSF3ZHXC5 - [0:0]" table="nat" +2024-05-08 15:23:58.727 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-VXPZDRYSSF3ZHXC5 - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-DNKKUBNIW74GFFD2" ipVersion=0x4 line=":KUBE-SEP-DNKKUBNIW74GFFD2 - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-DNKKUBNIW74GFFD2 - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-H6ZGMLWV3EYASBGL" ipVersion=0x4 line=":KUBE-SVC-H6ZGMLWV3EYASBGL - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-H6ZGMLWV3EYASBGL - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-FJOSKFOGQCYGNTIQ" ipVersion=0x4 line=":KUBE-SVC-FJOSKFOGQCYGNTIQ - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-FJOSKFOGQCYGNTIQ - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-STN2AYVEMB5KSJTI" ipVersion=0x4 line=":KUBE-SEP-STN2AYVEMB5KSJTI - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-STN2AYVEMB5KSJTI - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-J4JJHTDQMUN5VWB6" ipVersion=0x4 line=":KUBE-SVC-J4JJHTDQMUN5VWB6 - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-J4JJHTDQMUN5VWB6 - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-TX7BPJUFZ7YPXWZF" ipVersion=0x4 line=":KUBE-SEP-TX7BPJUFZ7YPXWZF - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-TX7BPJUFZ7YPXWZF - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-IWFLGN3R7SGCOZ56" ipVersion=0x4 line=":KUBE-SVC-IWFLGN3R7SGCOZ56 - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-IWFLGN3R7SGCOZ56 - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-4QXBCUECYDBKXTTB" ipVersion=0x4 line=":KUBE-SVC-4QXBCUECYDBKXTTB - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-4QXBCUECYDBKXTTB - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-EQOPB57DNDS5A2WM" ipVersion=0x4 line=":KUBE-SVC-EQOPB57DNDS5A2WM - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-EQOPB57DNDS5A2WM - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-UK5T3NL2KBE2TYM6" ipVersion=0x4 line=":KUBE-SVC-UK5T3NL2KBE2TYM6 - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-UK5T3NL2KBE2TYM6 - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-UUQ24OZJ7IE6W7TE" ipVersion=0x4 line=":KUBE-SEP-UUQ24OZJ7IE6W7TE - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-UUQ24OZJ7IE6W7TE - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-RXSN6L3XTY5VDSO6" ipVersion=0x4 line=":KUBE-SVC-RXSN6L3XTY5VDSO6 - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-RXSN6L3XTY5VDSO6 - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-MLDVOA6YM2FDIXAQ" ipVersion=0x4 line=":KUBE-SVC-MLDVOA6YM2FDIXAQ - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-MLDVOA6YM2FDIXAQ - [0:0]" table="nat" +2024-05-08 15:23:58.726 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-D5AOJO6MVLA5GRWQ" ipVersion=0x4 line=":KUBE-SVC-D5AOJO6MVLA5GRWQ - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-D5AOJO6MVLA5GRWQ - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-JCC4XVNOKDNWM5ZE" ipVersion=0x4 line=":KUBE-SVC-JCC4XVNOKDNWM5ZE - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-JCC4XVNOKDNWM5ZE - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-MSNFK5YFI7PFEEBO" ipVersion=0x4 line=":KUBE-SEP-MSNFK5YFI7PFEEBO - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-MSNFK5YFI7PFEEBO - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-NPQ7IHFD2XNITE4T" ipVersion=0x4 line=":KUBE-SVC-NPQ7IHFD2XNITE4T - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-NPQ7IHFD2XNITE4T - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-LEVFTNFCKN4YMK7L" ipVersion=0x4 line=":KUBE-SEP-LEVFTNFCKN4YMK7L - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-LEVFTNFCKN4YMK7L - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-GMRLYDDTWIJUGIF3" ipVersion=0x4 line=":KUBE-SVC-GMRLYDDTWIJUGIF3 - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-GMRLYDDTWIJUGIF3 - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-M5TG7NRZBWCTXJZX" ipVersion=0x4 line=":KUBE-SVC-M5TG7NRZBWCTXJZX - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-M5TG7NRZBWCTXJZX - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-Y4JWRWFATR2ZINOV" ipVersion=0x4 line=":KUBE-SEP-Y4JWRWFATR2ZINOV - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-Y4JWRWFATR2ZINOV - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-DZV2Y2VCXTY3BNR3" ipVersion=0x4 line=":KUBE-SVC-DZV2Y2VCXTY3BNR3 - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-DZV2Y2VCXTY3BNR3 - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-GSKN43HHZKBGUPHY" ipVersion=0x4 line=":KUBE-SEP-GSKN43HHZKBGUPHY - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-GSKN43HHZKBGUPHY - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-2T7PD3HI2G3QMEVD" ipVersion=0x4 line=":KUBE-SVC-2T7PD3HI2G3QMEVD - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-2T7PD3HI2G3QMEVD - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-AB4YQQQ7D66YOVGJ" ipVersion=0x4 line=":KUBE-SVC-AB4YQQQ7D66YOVGJ - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-AB4YQQQ7D66YOVGJ - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-A52BZFPZA5XO3M2T" ipVersion=0x4 line=":KUBE-SEP-A52BZFPZA5XO3M2T - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-A52BZFPZA5XO3M2T - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-QFJG3IBMPHHC2EHE" ipVersion=0x4 line=":KUBE-SVC-QFJG3IBMPHHC2EHE - [0:0]" table="nat" +2024-05-08 15:23:58.725 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-QFJG3IBMPHHC2EHE - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-G6JY5FDR2GVQJJM6" ipVersion=0x4 line=":KUBE-SEP-G6JY5FDR2GVQJJM6 - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-G6JY5FDR2GVQJJM6 - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-HB65J6KEISIHXFWC" ipVersion=0x4 line=":KUBE-SVC-HB65J6KEISIHXFWC - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-HB65J6KEISIHXFWC - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-7UDPYM5MZU43KT5B" ipVersion=0x4 line=":KUBE-SVC-7UDPYM5MZU43KT5B - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-7UDPYM5MZU43KT5B - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-SAZCE75LUBCDFPPO" ipVersion=0x4 line=":KUBE-SEP-SAZCE75LUBCDFPPO - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-SAZCE75LUBCDFPPO - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-E3IBCFULSWKQCT47" ipVersion=0x4 line=":KUBE-SVC-E3IBCFULSWKQCT47 - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-E3IBCFULSWKQCT47 - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-MLORXFJ4ICWCAORT" ipVersion=0x4 line=":KUBE-SVC-MLORXFJ4ICWCAORT - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-MLORXFJ4ICWCAORT - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-Z6KIE5WLISQE5HKY" ipVersion=0x4 line=":KUBE-SEP-Z6KIE5WLISQE5HKY - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-Z6KIE5WLISQE5HKY - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-RVX32V65TT6LIZYH" ipVersion=0x4 line=":KUBE-SEP-RVX32V65TT6LIZYH - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-RVX32V65TT6LIZYH - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-TWLCBUAADUUJJKL2" ipVersion=0x4 line=":KUBE-SEP-TWLCBUAADUUJJKL2 - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-TWLCBUAADUUJJKL2 - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-H6A4VVO3RG7WMM7O" ipVersion=0x4 line=":KUBE-SVC-H6A4VVO3RG7WMM7O - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-H6A4VVO3RG7WMM7O - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-BTQI4NDAAMNKIZK3" ipVersion=0x4 line=":KUBE-SEP-BTQI4NDAAMNKIZK3 - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-BTQI4NDAAMNKIZK3 - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-CJUI32YM3CI6R4R4" ipVersion=0x4 line=":KUBE-SEP-CJUI32YM3CI6R4R4 - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-CJUI32YM3CI6R4R4 - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-FN6WL5XLZP6OCM5B" ipVersion=0x4 line=":KUBE-SEP-FN6WL5XLZP6OCM5B - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-FN6WL5XLZP6OCM5B - [0:0]" table="nat" +2024-05-08 15:23:58.724 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-3EIJ5DADIMTS46Q5" ipVersion=0x4 line=":KUBE-SVC-3EIJ5DADIMTS46Q5 - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-3EIJ5DADIMTS46Q5 - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-W3GFXNJIKZEPTVQR" ipVersion=0x4 line=":KUBE-SEP-W3GFXNJIKZEPTVQR - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-W3GFXNJIKZEPTVQR - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-6FAJOJUR6MOTNYYM" ipVersion=0x4 line=":KUBE-SVC-6FAJOJUR6MOTNYYM - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-6FAJOJUR6MOTNYYM - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-5V5ZVH4US2Q3MG4P" ipVersion=0x4 line=":KUBE-SVC-5V5ZVH4US2Q3MG4P - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-5V5ZVH4US2Q3MG4P - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-WSWEALVZ5UODB5AO" ipVersion=0x4 line=":KUBE-SVC-WSWEALVZ5UODB5AO - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-WSWEALVZ5UODB5AO - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-JIFT53ZDHN2GW65R" ipVersion=0x4 line=":KUBE-SVC-JIFT53ZDHN2GW65R - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-JIFT53ZDHN2GW65R - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-DLWMNPJW4DIPABJZ" ipVersion=0x4 line=":KUBE-SEP-DLWMNPJW4DIPABJZ - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-DLWMNPJW4DIPABJZ - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-BSIMUJMFFAG4MDGG" ipVersion=0x4 line=":KUBE-SEP-BSIMUJMFFAG4MDGG - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-BSIMUJMFFAG4MDGG - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-7CKOJQ4UC6YLX7N5" ipVersion=0x4 line=":KUBE-SVC-7CKOJQ4UC6YLX7N5 - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-7CKOJQ4UC6YLX7N5 - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-XOZBKV6UZ2OQO7PJ" ipVersion=0x4 line=":KUBE-SEP-XOZBKV6UZ2OQO7PJ - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-XOZBKV6UZ2OQO7PJ - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-MBEGBE5QHTTJIKT4" ipVersion=0x4 line=":KUBE-SEP-MBEGBE5QHTTJIKT4 - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-MBEGBE5QHTTJIKT4 - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-HJEFVUJBUWGIJQSA" ipVersion=0x4 line=":KUBE-SEP-HJEFVUJBUWGIJQSA - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-HJEFVUJBUWGIJQSA - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-4SABUIWGDEUAVAXU" ipVersion=0x4 line=":KUBE-SVC-4SABUIWGDEUAVAXU - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-4SABUIWGDEUAVAXU - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-H2UPXT7LEQB2RKPM" ipVersion=0x4 line=":KUBE-SVC-H2UPXT7LEQB2RKPM - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-H2UPXT7LEQB2RKPM - [0:0]" table="nat" +2024-05-08 15:23:58.723 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-O2CDV4TQK4NFOMFY" ipVersion=0x4 line=":KUBE-SEP-O2CDV4TQK4NFOMFY - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-O2CDV4TQK4NFOMFY - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-7CUWUJKQKKK7M2GJ" ipVersion=0x4 line=":KUBE-SEP-7CUWUJKQKKK7M2GJ - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-7CUWUJKQKKK7M2GJ - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-Z25RWGBWNWJQRRNQ" ipVersion=0x4 line=":KUBE-SVC-Z25RWGBWNWJQRRNQ - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-Z25RWGBWNWJQRRNQ - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-LUJJD6CXAHQ5L4FH" ipVersion=0x4 line=":KUBE-SEP-LUJJD6CXAHQ5L4FH - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-LUJJD6CXAHQ5L4FH - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-7AY3X2DSMX6XEFDA" ipVersion=0x4 line=":KUBE-SEP-7AY3X2DSMX6XEFDA - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-7AY3X2DSMX6XEFDA - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-XTOSVNUQQTRX32PR" ipVersion=0x4 line=":KUBE-SEP-XTOSVNUQQTRX32PR - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-XTOSVNUQQTRX32PR - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-7QCTR7DWHM6G7VF7" ipVersion=0x4 line=":KUBE-SVC-7QCTR7DWHM6G7VF7 - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-7QCTR7DWHM6G7VF7 - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-WPD7SA4NZWWT7PMF" ipVersion=0x4 line=":KUBE-SEP-WPD7SA4NZWWT7PMF - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-WPD7SA4NZWWT7PMF - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-UGGAZCZT5SFNENHS" ipVersion=0x4 line=":KUBE-SEP-UGGAZCZT5SFNENHS - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-UGGAZCZT5SFNENHS - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-QN25BLRJ5S3G4NCI" ipVersion=0x4 line=":KUBE-SVC-QN25BLRJ5S3G4NCI - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-QN25BLRJ5S3G4NCI - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-7FALRYRKYDRRAIGA" ipVersion=0x4 line=":KUBE-SVC-7FALRYRKYDRRAIGA - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-7FALRYRKYDRRAIGA - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-7LSFJ3QL2U63CDUM" ipVersion=0x4 line=":KUBE-SVC-7LSFJ3QL2U63CDUM - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-7LSFJ3QL2U63CDUM - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-ZRX2ACCU77T4BJXJ" ipVersion=0x4 line=":KUBE-SEP-ZRX2ACCU77T4BJXJ - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-ZRX2ACCU77T4BJXJ - [0:0]" table="nat" +2024-05-08 15:23:58.722 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-TNA6UTJG6HRK3TJE" ipVersion=0x4 line=":KUBE-SVC-TNA6UTJG6HRK3TJE - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-TNA6UTJG6HRK3TJE - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-MHNPLH675SYFZERF" ipVersion=0x4 line=":KUBE-SVC-MHNPLH675SYFZERF - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-MHNPLH675SYFZERF - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-UIETT2GCAFJ3GRO3" ipVersion=0x4 line=":KUBE-SEP-UIETT2GCAFJ3GRO3 - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-UIETT2GCAFJ3GRO3 - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-UVIGNMFWQQVDUAR5" ipVersion=0x4 line=":KUBE-SVC-UVIGNMFWQQVDUAR5 - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-UVIGNMFWQQVDUAR5 - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-PRB6TQECAMN7BJNA" ipVersion=0x4 line=":KUBE-SEP-PRB6TQECAMN7BJNA - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-PRB6TQECAMN7BJNA - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-PID7SYGW7CWWIKNX" ipVersion=0x4 line=":KUBE-SVC-PID7SYGW7CWWIKNX - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-PID7SYGW7CWWIKNX - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-5QWI2CMG2XSBJJA7" ipVersion=0x4 line=":KUBE-SVC-5QWI2CMG2XSBJJA7 - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-5QWI2CMG2XSBJJA7 - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-YS26SZV5FM6T6HQL" ipVersion=0x4 line=":KUBE-SVC-YS26SZV5FM6T6HQL - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-YS26SZV5FM6T6HQL - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-YTR3WKSQIQUTUZ3F" ipVersion=0x4 line=":KUBE-SEP-YTR3WKSQIQUTUZ3F - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-YTR3WKSQIQUTUZ3F - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-OMMYUMPEQHZGCYZL" ipVersion=0x4 line=":KUBE-SEP-OMMYUMPEQHZGCYZL - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-OMMYUMPEQHZGCYZL - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-VVMG3Z4JVCMM4KZD" ipVersion=0x4 line=":KUBE-SVC-VVMG3Z4JVCMM4KZD - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-VVMG3Z4JVCMM4KZD - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-IVPEJA3BAHCVN55U" ipVersion=0x4 line=":KUBE-SVC-IVPEJA3BAHCVN55U - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-IVPEJA3BAHCVN55U - [0:0]" table="nat" +2024-05-08 15:23:58.721 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-AHEDJUJQOZT3IVYC" ipVersion=0x4 line=":KUBE-SEP-AHEDJUJQOZT3IVYC - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-AHEDJUJQOZT3IVYC - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-BTHL5OVQL4PX5YHA" ipVersion=0x4 line=":KUBE-SEP-BTHL5OVQL4PX5YHA - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-BTHL5OVQL4PX5YHA - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-NVNLZVDQSGQUD3NM" ipVersion=0x4 line=":KUBE-SVC-NVNLZVDQSGQUD3NM - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-NVNLZVDQSGQUD3NM - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-EXWBKNZYO3LINDJA" ipVersion=0x4 line=":KUBE-SVC-EXWBKNZYO3LINDJA - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-EXWBKNZYO3LINDJA - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-4ZIPLMOTRXOM54AS" ipVersion=0x4 line=":KUBE-SVC-4ZIPLMOTRXOM54AS - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-4ZIPLMOTRXOM54AS - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-EXT-4ZIPLMOTRXOM54AS" ipVersion=0x4 line=":KUBE-EXT-4ZIPLMOTRXOM54AS - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-EXT-4ZIPLMOTRXOM54AS - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-TM7LBONKHOGQDYIF" ipVersion=0x4 line=":KUBE-SVC-TM7LBONKHOGQDYIF - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-TM7LBONKHOGQDYIF - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-3DOZWTG3HFPNWBDT" ipVersion=0x4 line=":KUBE-SEP-3DOZWTG3HFPNWBDT - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-3DOZWTG3HFPNWBDT - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-WLRMOJ7S2HJOGBZJ" ipVersion=0x4 line=":KUBE-SVC-WLRMOJ7S2HJOGBZJ - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-WLRMOJ7S2HJOGBZJ - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-SQUZG2VKJW4F7R4U" ipVersion=0x4 line=":KUBE-SEP-SQUZG2VKJW4F7R4U - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-SQUZG2VKJW4F7R4U - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-JLWNJUHTK454THAD" ipVersion=0x4 line=":KUBE-SVC-JLWNJUHTK454THAD - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-JLWNJUHTK454THAD - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-QETW7GX5M6GGVEPU" ipVersion=0x4 line=":KUBE-SEP-QETW7GX5M6GGVEPU - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-QETW7GX5M6GGVEPU - [0:0]" table="nat" +2024-05-08 15:23:58.720 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-UFPOPGLZU7FN5CBH" ipVersion=0x4 line=":KUBE-SEP-UFPOPGLZU7FN5CBH - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-UFPOPGLZU7FN5CBH - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][501368] felix/health.go 167: Health: live +2024-05-08 15:23:58.719 [DEBUG][501368] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-PPW7ULP2MAT5XXOD" ipVersion=0x4 line=":KUBE-SVC-PPW7ULP2MAT5XXOD - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-PPW7ULP2MAT5XXOD - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-EXT-PPW7ULP2MAT5XXOD" ipVersion=0x4 line=":KUBE-EXT-PPW7ULP2MAT5XXOD - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-EXT-PPW7ULP2MAT5XXOD - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-WA4I6J6W56UBBTFO" ipVersion=0x4 line=":KUBE-SVC-WA4I6J6W56UBBTFO - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-WA4I6J6W56UBBTFO - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-732ROEGMHTE2S4TK" ipVersion=0x4 line=":KUBE-SVC-732ROEGMHTE2S4TK - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-732ROEGMHTE2S4TK - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-6JHNIEYHCAY27ZCP" ipVersion=0x4 line=":KUBE-SVC-6JHNIEYHCAY27ZCP - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-6JHNIEYHCAY27ZCP - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-WMTB2MFIIPD5SKGJ" ipVersion=0x4 line=":KUBE-SVC-WMTB2MFIIPD5SKGJ - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-WMTB2MFIIPD5SKGJ - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-R2O2LOXNQINSBTGK" ipVersion=0x4 line=":KUBE-SEP-R2O2LOXNQINSBTGK - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-R2O2LOXNQINSBTGK - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-I44HARIOI62RM5KK" ipVersion=0x4 line=":KUBE-SEP-I44HARIOI62RM5KK - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-I44HARIOI62RM5KK - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-IGQCLB6N4BMNAM4Y" ipVersion=0x4 line=":KUBE-SVC-IGQCLB6N4BMNAM4Y - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-IGQCLB6N4BMNAM4Y - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][501368] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc18704222500c752, ext:538680442362038, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:58.719 [DEBUG][501368] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc187042382481a48, ext:538685859833872, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:58.719 [DEBUG][501368] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc184f614140585e2, ext:157460394, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:58.718 [DEBUG][501368] felix/health.go 157: GET /liveness +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SEP-CDJ3WS4667IFKPEL" ipVersion=0x4 line=":KUBE-SEP-CDJ3WS4667IFKPEL - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SEP-CDJ3WS4667IFKPEL - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-GTCS7X5V7EWNSBJG" ipVersion=0x4 line=":KUBE-SVC-GTCS7X5V7EWNSBJG - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-GTCS7X5V7EWNSBJG - [0:0]" table="nat" +2024-05-08 15:23:58.719 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-4MRB3NPYMNEAUGQV" ipVersion=0x4 line=":KUBE-SVC-4MRB3NPYMNEAUGQV - [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-4MRB3NPYMNEAUGQV - [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-T6DLVELAYXXB2F54" ipVersion=0x4 line=":KUBE-SVC-T6DLVELAYXXB2F54 - [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-T6DLVELAYXXB2F54 - [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-AR36PQT25X2JZRRQ" ipVersion=0x4 line=":KUBE-SVC-AR36PQT25X2JZRRQ - [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-AR36PQT25X2JZRRQ - [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SVC-KBYEQRN5PEWXFGHF" ipVersion=0x4 line=":KUBE-SVC-KBYEQRN5PEWXFGHF - [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SVC-KBYEQRN5PEWXFGHF - [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-MARK-MASQ" ipVersion=0x4 line=":KUBE-MARK-MASQ - [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-MARK-MASQ - [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-NODEPORTS" ipVersion=0x4 line=":KUBE-NODEPORTS - [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-NODEPORTS - [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-POSTROUTING" ipVersion=0x4 line=":KUBE-POSTROUTING - [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-POSTROUTING - [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="PREROUTING" ipVersion=0x4 line=":PREROUTING ACCEPT [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":PREROUTING ACCEPT [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="OUTPUT" ipVersion=0x4 line=":OUTPUT ACCEPT [0:0]" table="nat" +2024-05-08 15:23:58.718 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":OUTPUT ACCEPT [0:0]" table="nat" +2024-05-08 15:23:58.717 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-SERVICES" ipVersion=0x4 line=":KUBE-SERVICES - [0:0]" table="nat" +2024-05-08 15:23:58.717 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-SERVICES - [0:0]" table="nat" +2024-05-08 15:23:58.717 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-PROXY-CANARY" ipVersion=0x4 line=":KUBE-PROXY-CANARY - [0:0]" table="nat" +2024-05-08 15:23:58.717 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-PROXY-CANARY - [0:0]" table="nat" +2024-05-08 15:23:58.717 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="POSTROUTING" ipVersion=0x4 line=":POSTROUTING ACCEPT [0:0]" table="nat" +2024-05-08 15:23:58.717 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":POSTROUTING ACCEPT [0:0]" table="nat" +2024-05-08 15:23:58.717 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="IP-MASQ-AGENT" ipVersion=0x4 line=":IP-MASQ-AGENT - [0:0]" table="nat" +2024-05-08 15:23:58.717 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":IP-MASQ-AGENT - [0:0]" table="nat" +2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 870: Found forward-reference chainName="KUBE-KUBELET-CANARY" ipVersion=0x4 line=":KUBE-KUBELET-CANARY - [0:0]" table="nat" +2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line=":KUBE-KUBELET-CANARY - [0:0]" table="nat" +2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 881: Not an append, skipping ipVersion=0x4 line="*nat" table="nat" +2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line="*nat" table="nat" +2024-05-08 15:23:58.716 [DEBUG][216945] felix/table.go 881: Not an append, skipping ipVersion=0x4 line="# Generated by iptables-nft-save v1.8.4 on Wed May 8 15:23:58 2024" table="nat" +2024-05-08 15:23:58.715 [DEBUG][216945] felix/table.go 851: Parsing line ipVersion=0x4 line="# Generated by iptables-nft-save v1.8.4 on Wed May 8 15:23:58 2024" table="nat" +2024-05-08 15:23:58.684 [DEBUG][216945] felix/table.go 604: Loading current iptables state and checking it is correct. ipVersion=0x4 table="nat" +2024-05-08 15:23:58.684 [DEBUG][216945] felix/versionparse.go 118: Parsed kernel version version=5.15.0-1057 +2024-05-08 15:23:58.684 [DEBUG][216945] felix/versionparse.go 110: Raw kernel version rawVersion="Linux version 5.15.0-1057-azure (buildd@lcy02-amd64-033) (gcc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0, GNU ld (GNU Binutils for Ubuntu) 2.38) #65-Ubuntu SMP Fri Feb 9 18:39:24 UTC 2024\n" +2024-05-08 15:23:58.684 [DEBUG][216945] felix/versionparse.go 118: Parsed kernel version version=5.15.0-1057 +2024-05-08 15:23:58.684 [DEBUG][216945] felix/versionparse.go 110: Raw kernel version rawVersion="Linux version 5.15.0-1057-azure (buildd@lcy02-amd64-033) (gcc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0, GNU ld (GNU Binutils for Ubuntu) 2.38) #65-Ubuntu SMP Fri Feb 9 18:39:24 UTC 2024\n" +2024-05-08 15:23:58.684 [DEBUG][216945] felix/feature_detect.go 255: Parsed iptables version version=1.8.4 +2024-05-08 15:23:58.684 [DEBUG][216945] felix/feature_detect.go 242: Ran iptables --version rawVersion="iptables v1.8.4 (legacy)\n" +2024-05-08 15:23:58.681 [DEBUG][216945] felix/feature_detect.go 112: Refreshing detected iptables features +2024-05-08 15:23:58.681 [DEBUG][216945] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="filter" +2024-05-08 15:23:58.681 [DEBUG][216945] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="mangle" +2024-05-08 15:23:58.681 [DEBUG][216945] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="filter" +2024-05-08 15:23:58.681 [DEBUG][216945] felix/table.go 944: Invalidating dataplane cache ipVersion=0x4 reason="refresh timer" table="nat" +2024-05-08 15:23:58.681 [DEBUG][216945] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="mangle" +2024-05-08 15:23:58.681 [DEBUG][216945] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="raw" +2024-05-08 15:23:58.681 [DEBUG][216945] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="raw" +2024-05-08 15:23:58.681 [DEBUG][216945] felix/ipsets.go 643: No dirty IP sets. family="inet" +2024-05-08 15:23:58.681 [DEBUG][216945] felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4 +2024-05-08 15:23:58.681 [DEBUG][216945] felix/xdp_state.go 1004: Updating ipsetIDsToMembers cache. family=4 +2024-05-08 15:23:58.681 [DEBUG][216945] felix/xdp_state.go 1605: Getting member changes. family=4 oldMembers=map[string]set.Set[string]{} +2024-05-08 15:23:58.681 [DEBUG][216945] felix/xdp_state.go 968: Processing member updates. family=4 +2024-05-08 15:23:58.681 [DEBUG][216945] felix/xdp_state.go 1932: Finished processing BPF actions. family="ipv4" +2024-05-08 15:23:58.681 [DEBUG][216945] felix/xdp_state.go 1798: Processing BPF actions. family="ipv4" +2024-05-08 15:23:58.681 [DEBUG][216945] felix/xdp_state.go 1270: Finished processing pending diff state. bpfActions=intdataplane.xdpBPFActions{CreateMap:set.Typed[string]{}, RemoveMap:set.Typed[string]{}, AddToMap:map[string]map[string]uint32{}, RemoveFromMap:map[string]map[string]uint32{}, InstallXDP:set.Typed[string]{}, UninstallXDP:set.Typed[string]{}, MembersToDrop:map[string]map[string]uint32{}, MembersToAdd:map[string]map[string]uint32{}} family=4 newCS=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} +2024-05-08 15:23:58.681 [DEBUG][216945] felix/xdp_state.go 1043: Processing pending diff state. cs=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} family=4 +2024-05-08 15:23:58.680 [DEBUG][216945] felix/endpoint_mgr.go 443: Reporting endpoint status. dirtyEndpoints=set.Set{} +2024-05-08 15:23:58.680 [DEBUG][216945] felix/int_dataplane.go 1807: Applying dataplane updates +2024-05-08 15:23:58.680 [DEBUG][216945] felix/int_dataplane.go 1785: Reschedule kick received +2024-05-08 15:23:58.605 [INFO][65] felix/summary.go 100: Summarising 1 dataplane reconciliation loops over 10.5s: avg=6ms longest=6ms (resync-ipsets-v4) +2024-05-08 15:23:58.605 [DEBUG][65] felix/int_dataplane.go 2080: Asked to reschedule. delay=52.18051384s +2024-05-08 15:23:58.605 [DEBUG][65] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="filter" +2024-05-08 15:23:58.605 [DEBUG][65] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="filter" +2024-05-08 15:23:58.604 [DEBUG][65] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="nat" +2024-05-08 15:23:58.604 [DEBUG][65] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="nat" +2024-05-08 15:23:58.604 [DEBUG][65] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="mangle" +2024-05-08 15:23:58.604 [DEBUG][65] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="mangle" +2024-05-08 15:23:58.604 [DEBUG][65] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="raw" +2024-05-08 15:23:58.604 [DEBUG][65] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="raw" +2024-05-08 15:23:58.604 [DEBUG][65] felix/ipsets.go 643: No dirty IP sets. family="inet" +2024-05-08 15:23:58.604 [DEBUG][65] felix/ipsets.go 366: Finished IPSets resync family="inet" numInconsistenciesFound=0 resyncDuration=3.864976ms +2024-05-08 15:23:58.604 [DEBUG][65] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40this-host" +2024-05-08 15:23:58.604 [DEBUG][65] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40masq-ipam-pools" +2024-05-08 15:23:58.604 [DEBUG][65] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40all-ipam-pools" +2024-05-08 15:23:58.604 [DEBUG][65] felix/ipsets.go 589: Whitelisting IP sets. ID="all-ipam-pools" family="inet" mainName="cali40all-ipam-pools" +2024-05-08 15:23:58.604 [DEBUG][65] felix/ipsets.go 589: Whitelisting IP sets. ID="this-host" family="inet" mainName="cali40this-host" +2024-05-08 15:23:58.603 [DEBUG][65] felix/ipsets.go 589: Whitelisting IP sets. ID="masq-ipam-pools" family="inet" mainName="cali40masq-ipam-pools" +2024-05-08 15:23:58.603 [DEBUG][65] felix/ipsets.go 467: Found member in dataplane canon=127.0.0.1 family="inet" member="127.0.0.1" setID="this-host" +2024-05-08 15:23:58.603 [DEBUG][65] felix/ipsets.go 467: Found member in dataplane canon=127.0.0.0 family="inet" member="127.0.0.0" setID="this-host" +2024-05-08 15:23:58.603 [DEBUG][65] felix/ipsets.go 467: Found member in dataplane canon=10.68.14.192 family="inet" member="10.68.14.192" setID="this-host" +2024-05-08 15:23:58.602 [DEBUG][65] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40this-host" +2024-05-08 15:23:58.602 [DEBUG][65] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40masq-ipam-pools" +2024-05-08 15:23:58.602 [DEBUG][65] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40all-ipam-pools" +2024-05-08 15:23:58.600 [DEBUG][65] felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4 +2024-05-08 15:23:58.600 [DEBUG][65] felix/ipsets.go 314: Resyncing ipsets with dataplane. family="inet" +2024-05-08 15:23:58.600 [DEBUG][65] felix/ipsets.go 234: Asked to resync with the dataplane on next update. family="inet" +2024-05-08 15:23:58.600 [DEBUG][65] felix/xdp_state.go 1004: Updating ipsetIDsToMembers cache. family=4 +2024-05-08 15:23:58.600 [DEBUG][65] felix/xdp_state.go 1605: Getting member changes. family=4 oldMembers=map[string]set.Set[string]{} +2024-05-08 15:23:58.600 [DEBUG][65] felix/xdp_state.go 968: Processing member updates. family=4 +2024-05-08 15:23:58.599 [DEBUG][65] felix/xdp_state.go 1932: Finished processing BPF actions. family="ipv4" +2024-05-08 15:23:58.599 [DEBUG][65] felix/xdp_state.go 1798: Processing BPF actions. family="ipv4" +2024-05-08 15:23:58.599 [DEBUG][65] felix/xdp_state.go 1270: Finished processing pending diff state. bpfActions=intdataplane.xdpBPFActions{CreateMap:set.Typed[string]{}, RemoveMap:set.Typed[string]{}, AddToMap:map[string]map[string]uint32{}, RemoveFromMap:map[string]map[string]uint32{}, InstallXDP:set.Typed[string]{}, UninstallXDP:set.Typed[string]{}, MembersToDrop:map[string]map[string]uint32{}, MembersToAdd:map[string]map[string]uint32{}} family=4 newCS=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} +2024-05-08 15:23:58.599 [DEBUG][65] felix/xdp_state.go 1043: Processing pending diff state. cs=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} family=4 +2024-05-08 15:23:58.599 [DEBUG][65] felix/endpoint_mgr.go 443: Reporting endpoint status. dirtyEndpoints=set.Set{} +2024-05-08 15:23:58.599 [DEBUG][65] felix/int_dataplane.go 1807: Applying dataplane updates +2024-05-08 15:23:58.598 [DEBUG][65] felix/int_dataplane.go 1773: Refreshing IP sets state +2024-05-08 15:23:58.567 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azvd9f99abc12b" +2024-05-08 15:23:58.567 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv5409bbb0f91" +2024-05-08 15:23:58.567 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azvd579a75f38c" +2024-05-08 15:23:58.567 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv99e55aec9e0" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv538491b29bd" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv702b28249ec" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv563ecfa513f" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv01da877484a" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv44171c1d230" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azvd2b7e4d3c5d" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azva64fe438af1" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv85e9113a66b" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azva58c9d511bf" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azvf710a7befe4" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv7d40c16f7e7" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv2cdbcc6462e" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azvf2744c5952c" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azvbeb91ceceff" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv1f0a8fc1b7f" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv984e44d890b" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azvc9e7220daa4" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv87bed97554f" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv9e563e1eb6f" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azveb88cda3f61" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azvfb2cefaad17" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="enP59002s1" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 957: Examining link for MTU calculation mtu=1500 name="eth0" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=65536 name="lo" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv21bccd3ab44" +2024-05-08 15:23:58.566 [DEBUG][3576126] felix/int_dataplane.go 954: Skipping interface for MTU detection mtu=1500 name="azv12848e8044b" +2024-05-08 15:23:58.543 [INFO][528208] felix/summary.go 100: Summarising 10 dataplane reconciliation loops over 1m3.4s: avg=73ms longest=540ms (resync-ipsets-v4) +2024-05-08 15:23:58.533 [DEBUG][76] felix/health.go 167: Health: live +2024-05-08 15:23:58.533 [DEBUG][76] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:58.533 [DEBUG][76] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc187042335fafad3, ext:7941524519871, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:58.533 [DEBUG][76] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc186fc61e1402111, ext:176730109, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:58.533 [DEBUG][76] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870423392852d8, ext:7941577823172, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:58.532 [DEBUG][76] felix/health.go 157: GET /liveness +2024-05-08 15:23:58.487 [DEBUG][1546] felix/sync_client.go 356: Pong sent to Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type="" +2024-05-08 15:23:58.487 [DEBUG][1546] felix/sync_client.go 347: Ping received from Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type="" +2024-05-08 15:23:58.487 [DEBUG][1546] felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, 58, 485630491, time.Local)}} type="" +2024-05-08 15:23:58.429 [INFO][80] felix/summary.go 100: Summarising 17 dataplane reconciliation loops over 1m0.5s: avg=13ms longest=145ms (resync-nat-v4) +2024-05-08 15:23:58.300 [DEBUG][104] felix/sync_client.go 356: Pong sent to Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type="" +2024-05-08 15:23:58.300 [DEBUG][104] felix/sync_client.go 347: Ping received from Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type="" +2024-05-08 15:23:58.300 [DEBUG][104] felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, 58, 292811972, time.Local)}} type="" +2024-05-08 15:23:58.224 [DEBUG][3576126] felix/health.go 167: Health: live +2024-05-08 15:23:58.224 [DEBUG][3576126] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:58.223 [DEBUG][3576126] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422c5b18d8f, ext:583520454847290, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:58.223 [DEBUG][3576126] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422c4d61543, ext:583520440464210, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:58.223 [DEBUG][3576126] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc184ca4aaf971563, ext:157755762, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:58.223 [DEBUG][3576126] felix/health.go 157: GET /liveness +2024-05-08 15:23:58.214 [INFO][97] felix/summary.go 100: Summarising 11 dataplane reconciliation loops over 1m1.1s: avg=11ms longest=64ms (resync-nat-v4) +2024-05-08 15:23:58.169 [INFO][2333] felix/summary.go 100: Summarising 35 dataplane reconciliation loops over 1m2s: avg=12ms longest=46ms (resync-filter-v4,resync-filter-v6,resync-mangle-v4,resync-mangle-v6,update-filter-v4,update-filter-v6) +2024-05-08 15:23:58.038 [INFO][501368] felix/summary.go 100: Summarising 1 dataplane reconciliation loops over 10.6s: avg=3ms longest=3ms (resync-ipsets-v4) +2024-05-08 15:23:58.038 [DEBUG][501368] felix/int_dataplane.go 2080: Asked to reschedule. delay=6.976670278s +2024-05-08 15:23:58.038 [DEBUG][501368] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="filter" +2024-05-08 15:23:58.038 [DEBUG][501368] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="nat" +2024-05-08 15:23:58.038 [DEBUG][501368] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="mangle" +2024-05-08 15:23:58.038 [DEBUG][501368] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="filter" +2024-05-08 15:23:58.038 [DEBUG][501368] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="raw" +2024-05-08 15:23:58.038 [DEBUG][501368] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="nat" +2024-05-08 15:23:58.038 [DEBUG][501368] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="mangle" +2024-05-08 15:23:58.037 [DEBUG][501368] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="raw" +2024-05-08 15:23:58.037 [DEBUG][501368] felix/ipsets.go 643: No dirty IP sets. family="inet" +2024-05-08 15:23:58.037 [DEBUG][501368] felix/ipsets.go 366: Finished IPSets resync family="inet" numInconsistenciesFound=0 resyncDuration=2.234549ms +2024-05-08 15:23:58.037 [DEBUG][501368] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40this-host" +2024-05-08 15:23:58.037 [DEBUG][501368] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40masq-ipam-pools" +2024-05-08 15:23:58.037 [DEBUG][501368] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40all-ipam-pools" +2024-05-08 15:23:58.037 [DEBUG][501368] felix/ipsets.go 589: Whitelisting IP sets. ID="all-ipam-pools" family="inet" mainName="cali40all-ipam-pools" +2024-05-08 15:23:58.037 [DEBUG][501368] felix/ipsets.go 589: Whitelisting IP sets. ID="this-host" family="inet" mainName="cali40this-host" +2024-05-08 15:23:58.037 [DEBUG][501368] felix/ipsets.go 589: Whitelisting IP sets. ID="masq-ipam-pools" family="inet" mainName="cali40masq-ipam-pools" +2024-05-08 15:23:58.037 [DEBUG][501368] felix/ipsets.go 467: Found member in dataplane canon=10.68.15.158 family="inet" member="10.68.15.158" setID="this-host" +2024-05-08 15:23:58.037 [DEBUG][501368] felix/ipsets.go 467: Found member in dataplane canon=127.0.0.1 family="inet" member="127.0.0.1" setID="this-host" +2024-05-08 15:23:58.037 [DEBUG][501368] felix/ipsets.go 467: Found member in dataplane canon=127.0.0.0 family="inet" member="127.0.0.0" setID="this-host" +2024-05-08 15:23:58.037 [DEBUG][501368] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40this-host" +2024-05-08 15:23:58.037 [DEBUG][501368] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40masq-ipam-pools" +2024-05-08 15:23:58.037 [DEBUG][501368] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40all-ipam-pools" +2024-05-08 15:23:58.035 [DEBUG][501368] felix/ipsets.go 314: Resyncing ipsets with dataplane. family="inet" +2024-05-08 15:23:58.035 [DEBUG][501368] felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4 +2024-05-08 15:23:58.035 [DEBUG][501368] felix/ipsets.go 234: Asked to resync with the dataplane on next update. family="inet" +2024-05-08 15:23:58.035 [DEBUG][501368] felix/xdp_state.go 1004: Updating ipsetIDsToMembers cache. family=4 +2024-05-08 15:23:58.035 [DEBUG][501368] felix/xdp_state.go 1605: Getting member changes. family=4 oldMembers=map[string]set.Set[string]{} +2024-05-08 15:23:58.035 [DEBUG][501368] felix/xdp_state.go 968: Processing member updates. family=4 +2024-05-08 15:23:58.035 [DEBUG][501368] felix/xdp_state.go 1932: Finished processing BPF actions. family="ipv4" +2024-05-08 15:23:58.035 [DEBUG][501368] felix/xdp_state.go 1798: Processing BPF actions. family="ipv4" +2024-05-08 15:23:58.035 [DEBUG][501368] felix/xdp_state.go 1270: Finished processing pending diff state. bpfActions=intdataplane.xdpBPFActions{CreateMap:set.Typed[string]{}, RemoveMap:set.Typed[string]{}, AddToMap:map[string]map[string]uint32{}, RemoveFromMap:map[string]map[string]uint32{}, InstallXDP:set.Typed[string]{}, UninstallXDP:set.Typed[string]{}, MembersToDrop:map[string]map[string]uint32{}, MembersToAdd:map[string]map[string]uint32{}} family=4 newCS=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} +2024-05-08 15:23:58.035 [DEBUG][501368] felix/xdp_state.go 1043: Processing pending diff state. cs=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} family=4 +2024-05-08 15:23:58.034 [DEBUG][501368] felix/endpoint_mgr.go 443: Reporting endpoint status. dirtyEndpoints=set.Set{} +2024-05-08 15:23:58.034 [DEBUG][501368] felix/int_dataplane.go 1807: Applying dataplane updates +2024-05-08 15:23:58.034 [DEBUG][501368] felix/int_dataplane.go 1773: Refreshing IP sets state +bird: Netlink: No route to host +bird: Netlink: No route to host +bird: Netlink: No route to host +bird: Netlink: No route to host +2024-05-08 15:23:57.975 [DEBUG][3503680] felix/health.go 167: Health: live +2024-05-08 15:23:57.975 [DEBUG][3503680] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:57.975 [DEBUG][3503680] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc187042375096a47, ext:583523243706465, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.975 [DEBUG][3503680] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc184ca4aadf24db3, ext:124751409, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.975 [DEBUG][3503680] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422c14a5fff, ext:583520375548541, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.974 [DEBUG][3503680] felix/health.go 157: GET /liveness +2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", ""} chainName="INPUT" expectedRuleIDs=[]string{"Cz_u1IQiXIMmKD4c", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter" +2024-05-08 15:23:57.969 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "tVnHkvAo15HuiPy0", "", "", "", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="filter" +2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "", "", "", "", "", "", "", "", "tVnHkvAo15HuiPy0", "", ""} chainName="OUTPUT" expectedRuleIDs=[]string{"tVnHkvAo15HuiPy0", "", "", "", "", "", "", "", "", "", "", "", "", "", ""} ipVersion=0x4 table="raw" +2024-05-08 15:23:57.942 [WARNING][56] felix/table.go 654: Detected out-of-sync inserts, marking for resync actualRuleIDs=[]string{"", "", "", "", "6gwbT8clXdHdC1b1"} chainName="PREROUTING" expectedRuleIDs=[]string{"6gwbT8clXdHdC1b1", "", "", "", ""} ipVersion=0x4 table="raw" +2024-05-08 15:23:57.889 [INFO][3503680] felix/summary.go 100: Summarising 1 dataplane reconciliation loops over 7.8s: avg=4ms longest=4ms (resync-ipsets-v4) +2024-05-08 15:23:57.889 [DEBUG][3503680] felix/int_dataplane.go 2080: Asked to reschedule. delay=15.912807212s +2024-05-08 15:23:57.889 [DEBUG][3503680] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="filter" +2024-05-08 15:23:57.889 [DEBUG][3503680] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="mangle" +2024-05-08 15:23:57.889 [DEBUG][3503680] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="filter" +2024-05-08 15:23:57.889 [DEBUG][3503680] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="nat" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="mangle" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="nat" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="raw" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="raw" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/ipsets.go 643: No dirty IP sets. family="inet" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/ipsets.go 366: Finished IPSets resync family="inet" numInconsistenciesFound=0 resyncDuration=2.105217ms +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40masq-ipam-pools" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40all-ipam-pools" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40this-host" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/ipsets.go 589: Whitelisting IP sets. ID="this-host" family="inet" mainName="cali40this-host" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/ipsets.go 589: Whitelisting IP sets. ID="masq-ipam-pools" family="inet" mainName="cali40masq-ipam-pools" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/ipsets.go 589: Whitelisting IP sets. ID="all-ipam-pools" family="inet" mainName="cali40all-ipam-pools" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40masq-ipam-pools" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40all-ipam-pools" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/ipsets.go 467: Found member in dataplane canon=127.0.0.0 family="inet" member="127.0.0.0" setID="this-host" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/ipsets.go 467: Found member in dataplane canon=127.0.0.1 family="inet" member="127.0.0.1" setID="this-host" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/ipsets.go 467: Found member in dataplane canon=10.68.1.166 family="inet" member="10.68.1.166" setID="this-host" +2024-05-08 15:23:57.888 [DEBUG][3503680] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40this-host" +2024-05-08 15:23:57.886 [DEBUG][3503680] felix/ipsets.go 314: Resyncing ipsets with dataplane. family="inet" +2024-05-08 15:23:57.886 [DEBUG][3503680] felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4 +2024-05-08 15:23:57.886 [DEBUG][3503680] felix/ipsets.go 234: Asked to resync with the dataplane on next update. family="inet" +2024-05-08 15:23:57.886 [DEBUG][3503680] felix/xdp_state.go 1004: Updating ipsetIDsToMembers cache. family=4 +2024-05-08 15:23:57.886 [DEBUG][3503680] felix/xdp_state.go 1605: Getting member changes. family=4 oldMembers=map[string]set.Set[string]{} +2024-05-08 15:23:57.886 [DEBUG][3503680] felix/xdp_state.go 968: Processing member updates. family=4 +2024-05-08 15:23:57.886 [DEBUG][3503680] felix/xdp_state.go 1932: Finished processing BPF actions. family="ipv4" +2024-05-08 15:23:57.886 [DEBUG][3503680] felix/xdp_state.go 1798: Processing BPF actions. family="ipv4" +2024-05-08 15:23:57.886 [DEBUG][3503680] felix/xdp_state.go 1270: Finished processing pending diff state. bpfActions=intdataplane.xdpBPFActions{CreateMap:set.Typed[string]{}, RemoveMap:set.Typed[string]{}, AddToMap:map[string]map[string]uint32{}, RemoveFromMap:map[string]map[string]uint32{}, InstallXDP:set.Typed[string]{}, UninstallXDP:set.Typed[string]{}, MembersToDrop:map[string]map[string]uint32{}, MembersToAdd:map[string]map[string]uint32{}} family=4 newCS=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} +2024-05-08 15:23:57.886 [DEBUG][3503680] felix/xdp_state.go 1043: Processing pending diff state. cs=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} family=4 +2024-05-08 15:23:57.885 [DEBUG][3503680] felix/endpoint_mgr.go 443: Reporting endpoint status. dirtyEndpoints=set.Set{} +2024-05-08 15:23:57.885 [DEBUG][3503680] felix/int_dataplane.go 1807: Applying dataplane updates +2024-05-08 15:23:57.885 [DEBUG][3503680] felix/int_dataplane.go 1773: Refreshing IP sets state +2024-05-08 15:23:57.855 [DEBUG][52] felix/sync_client.go 356: Pong sent to Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type="" +2024-05-08 15:23:57.855 [DEBUG][52] felix/sync_client.go 347: Ping received from Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type="" +2024-05-08 15:23:57.855 [DEBUG][52] felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, 57, 864927715, time.Local)}} type="" +2024-05-08 15:23:57.701 [DEBUG][216945] felix/health.go 167: Health: ready +2024-05-08 15:23:57.701 [DEBUG][216945] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:57.701 [DEBUG][216945] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1869a51f0c73232, ext:123276733, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.701 [DEBUG][216945] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc187042180d1eac9, ext:108350318672980, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.701 [DEBUG][216945] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc187042343376ae0, ext:108357358879439, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.701 [DEBUG][216945] felix/health.go 152: GET /readiness +2024-05-08 15:23:57.679 [DEBUG][3383360] felix/sync_client.go 356: Pong sent to Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type="" +2024-05-08 15:23:57.679 [DEBUG][3383360] felix/sync_client.go 347: Ping received from Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type="" +2024-05-08 15:23:57.679 [DEBUG][3383360] felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, 57, 678730804, time.Local)}} type="" +2024-05-08 15:23:57.507 [DEBUG][3583983] felix/health.go 167: Health: live +2024-05-08 15:23:57.507 [DEBUG][3583983] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:57.507 [DEBUG][3583983] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc18704230db390fb, ext:583521218251528, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.506 [DEBUG][3583983] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc18704230bceaaa3, ext:583521186473136, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.506 [DEBUG][3583983] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc184ca4ad9017524, ext:407905585, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.506 [DEBUG][3583983] felix/health.go 157: GET /liveness +2024-05-08 15:23:57.482 [INFO][85] felix/summary.go 100: Summarising 11 dataplane reconciliation loops over 1m5.1s: avg=4ms longest=12ms (resync-nat-v4) +2024-05-08 15:23:57.447 [DEBUG][3596528] felix/health.go 167: Health: ready +2024-05-08 15:23:57.447 [DEBUG][3596528] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:57.447 [DEBUG][3596528] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422c1f39569, ext:583520389145641, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.447 [DEBUG][3596528] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422bb3c74e4, ext:583520350222756, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.446 [DEBUG][3596528] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc184ca4aafc787fa, ext:158013754, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.446 [DEBUG][3596528] felix/health.go 152: GET /readiness +2024-05-08 15:23:57.428 [DEBUG][65] felix/health.go 167: Health: ready +2024-05-08 15:23:57.428 [DEBUG][65] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:57.427 [DEBUG][65] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc187042106a98bd2, ext:2441795233498, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.427 [DEBUG][65] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc18704210762abde, ext:2441807365762, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.427 [DEBUG][65] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc18701be9e16796f, ext:188248083, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.427 [DEBUG][65] felix/health.go 152: GET /readiness +2024-05-08 15:23:57.390 [DEBUG][3383360] felix/health.go 167: Health: ready +2024-05-08 15:23:57.390 [DEBUG][3383360] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:57.389 [DEBUG][3383360] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc184ca4aafaad8a5, ext:161685994, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.389 [DEBUG][3383360] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422c00e5de5, ext:583520362901902, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.389 [DEBUG][3383360] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422ba5be65e, ext:583520341061539, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.389 [DEBUG][3383360] felix/health.go 152: GET /readiness +2024-05-08 15:23:57.366 [DEBUG][3435880] felix/health.go 167: Health: live +2024-05-08 15:23:57.366 [DEBUG][3435880] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:57.366 [DEBUG][3435880] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422c183b4f6, ext:583520381062423, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.366 [DEBUG][3435880] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422bb2a0171, ext:583520348262290, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.366 [DEBUG][3435880] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc184ca4ab029c59e, ext:163697599, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.365 [DEBUG][3435880] felix/health.go 157: GET /liveness +2024-05-08 15:23:57.289 [DEBUG][3794357] felix/health.go 167: Health: ready +2024-05-08 15:23:57.289 [DEBUG][3794357] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:57.288 [DEBUG][3794357] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc18704221c52405b, ext:538680295268992, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.288 [DEBUG][3794357] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422c4c38bbc, ext:538682900040573, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.288 [DEBUG][3794357] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc184f61410b0c13a, ext:100135675, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.288 [DEBUG][3794357] felix/health.go 152: GET /readiness +2024-05-08 15:23:57.242 [INFO][732993] felix/summary.go 100: Summarising 1 dataplane reconciliation loops over 10.2s: avg=4ms longest=4ms (resync-ipsets-v4) +2024-05-08 15:23:57.242 [DEBUG][732993] felix/int_dataplane.go 2080: Asked to reschedule. delay=3.196499738s +2024-05-08 15:23:57.242 [DEBUG][732993] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="filter" +2024-05-08 15:23:57.242 [DEBUG][732993] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="mangle" +2024-05-08 15:23:57.242 [DEBUG][732993] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="nat" +2024-05-08 15:23:57.242 [DEBUG][732993] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="filter" +2024-05-08 15:23:57.242 [DEBUG][732993] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="mangle" +2024-05-08 15:23:57.242 [DEBUG][732993] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="nat" +2024-05-08 15:23:57.242 [DEBUG][732993] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="raw" +2024-05-08 15:23:57.242 [DEBUG][732993] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="raw" +2024-05-08 15:23:57.242 [DEBUG][732993] felix/ipsets.go 643: No dirty IP sets. family="inet" +2024-05-08 15:23:57.242 [DEBUG][732993] felix/ipsets.go 366: Finished IPSets resync family="inet" numInconsistenciesFound=0 resyncDuration=2.412832ms +2024-05-08 15:23:57.242 [DEBUG][732993] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40this-host" +2024-05-08 15:23:57.242 [DEBUG][732993] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40masq-ipam-pools" +2024-05-08 15:23:57.242 [DEBUG][732993] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40all-ipam-pools" +2024-05-08 15:23:57.242 [DEBUG][732993] felix/ipsets.go 589: Whitelisting IP sets. ID="this-host" family="inet" mainName="cali40this-host" +2024-05-08 15:23:57.242 [DEBUG][732993] felix/ipsets.go 589: Whitelisting IP sets. ID="masq-ipam-pools" family="inet" mainName="cali40masq-ipam-pools" +2024-05-08 15:23:57.241 [DEBUG][732993] felix/ipsets.go 589: Whitelisting IP sets. ID="all-ipam-pools" family="inet" mainName="cali40all-ipam-pools" +2024-05-08 15:23:57.241 [DEBUG][732993] felix/ipsets.go 467: Found member in dataplane canon=10.68.28.190 family="inet" member="10.68.28.190" setID="this-host" +2024-05-08 15:23:57.241 [DEBUG][732993] felix/ipsets.go 467: Found member in dataplane canon=127.0.0.0 family="inet" member="127.0.0.0" setID="this-host" +2024-05-08 15:23:57.241 [DEBUG][732993] felix/ipsets.go 467: Found member in dataplane canon=127.0.0.1 family="inet" member="127.0.0.1" setID="this-host" +2024-05-08 15:23:57.241 [DEBUG][732993] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40this-host" +2024-05-08 15:23:57.241 [DEBUG][732993] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40masq-ipam-pools" +2024-05-08 15:23:57.241 [DEBUG][732993] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40all-ipam-pools" +2024-05-08 15:23:57.239 [DEBUG][732993] felix/ipsets.go 314: Resyncing ipsets with dataplane. family="inet" +2024-05-08 15:23:57.239 [DEBUG][732993] felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4 +2024-05-08 15:23:57.239 [DEBUG][732993] felix/ipsets.go 234: Asked to resync with the dataplane on next update. family="inet" +2024-05-08 15:23:57.239 [DEBUG][732993] felix/xdp_state.go 1004: Updating ipsetIDsToMembers cache. family=4 +2024-05-08 15:23:57.239 [DEBUG][732993] felix/xdp_state.go 1605: Getting member changes. family=4 oldMembers=map[string]set.Set[string]{} +2024-05-08 15:23:57.239 [DEBUG][732993] felix/xdp_state.go 968: Processing member updates. family=4 +2024-05-08 15:23:57.239 [DEBUG][732993] felix/xdp_state.go 1932: Finished processing BPF actions. family="ipv4" +2024-05-08 15:23:57.239 [DEBUG][732993] felix/xdp_state.go 1798: Processing BPF actions. family="ipv4" +2024-05-08 15:23:57.239 [DEBUG][732993] felix/xdp_state.go 1270: Finished processing pending diff state. bpfActions=intdataplane.xdpBPFActions{CreateMap:set.Typed[string]{}, RemoveMap:set.Typed[string]{}, AddToMap:map[string]map[string]uint32{}, RemoveFromMap:map[string]map[string]uint32{}, InstallXDP:set.Typed[string]{}, UninstallXDP:set.Typed[string]{}, MembersToDrop:map[string]map[string]uint32{}, MembersToAdd:map[string]map[string]uint32{}} family=4 newCS=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} +2024-05-08 15:23:57.239 [DEBUG][732993] felix/xdp_state.go 1043: Processing pending diff state. cs=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} family=4 +2024-05-08 15:23:57.239 [DEBUG][732993] felix/endpoint_mgr.go 443: Reporting endpoint status. dirtyEndpoints=set.Set{} +2024-05-08 15:23:57.238 [DEBUG][732993] felix/int_dataplane.go 1807: Applying dataplane updates +2024-05-08 15:23:57.238 [DEBUG][732993] felix/int_dataplane.go 1773: Refreshing IP sets state +2024-05-08 15:23:57.215 [DEBUG][3794357] felix/health.go 167: Health: live +2024-05-08 15:23:57.215 [DEBUG][3794357] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:57.214 [DEBUG][3794357] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc18704221c52405b, ext:538680295268992, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.214 [DEBUG][3794357] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422c4c38bbc, ext:538682900040573, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.214 [DEBUG][3794357] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc184f61410b0c13a, ext:100135675, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.214 [DEBUG][3794357] felix/health.go 157: GET /liveness +2024-05-08 15:23:57.199 [DEBUG][88347] felix/health.go 167: Health: live +2024-05-08 15:23:57.199 [DEBUG][88347] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:57.198 [DEBUG][88347] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc184ca4ab05d2cb0, ext:169261950, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.198 [DEBUG][88347] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422c25fa118, ext:583520397670886, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.198 [DEBUG][88347] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422bb9a7822, ext:583520357828336, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.198 [DEBUG][88347] felix/health.go 157: GET /liveness +2024-05-08 15:23:57.122 [DEBUG][990568] felix/health.go 167: Health: live +2024-05-08 15:23:57.121 [DEBUG][990568] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:57.121 [DEBUG][990568] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc187042187218ce6, ext:108350416773827, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.121 [DEBUG][990568] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422cf0aef41, ext:108355549509406, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.121 [DEBUG][990568] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1869a51f2419877, ext:140294228, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:57.120 [DEBUG][990568] felix/health.go 157: GET /liveness +2024-05-08 15:23:57.053 [INFO][216945] felix/summary.go 100: Summarising 1 dataplane reconciliation loops over 10.1s: avg=4ms longest=4ms (resync-ipsets-v4) +2024-05-08 15:23:57.053 [DEBUG][216945] felix/int_dataplane.go 2080: Asked to reschedule. delay=1.626286899s +2024-05-08 15:23:57.053 [DEBUG][216945] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="nat" +2024-05-08 15:23:57.053 [DEBUG][216945] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="filter" +2024-05-08 15:23:57.053 [DEBUG][216945] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="nat" +2024-05-08 15:23:57.053 [DEBUG][216945] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="mangle" +2024-05-08 15:23:57.053 [DEBUG][216945] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="filter" +2024-05-08 15:23:57.053 [DEBUG][216945] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="mangle" +2024-05-08 15:23:57.053 [DEBUG][216945] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="raw" +2024-05-08 15:23:57.053 [DEBUG][216945] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="raw" +2024-05-08 15:23:57.053 [DEBUG][216945] felix/ipsets.go 643: No dirty IP sets. family="inet" +2024-05-08 15:23:57.053 [DEBUG][216945] felix/ipsets.go 366: Finished IPSets resync family="inet" numInconsistenciesFound=0 resyncDuration=2.49683ms +2024-05-08 15:23:57.053 [DEBUG][216945] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40masq-ipam-pools" +2024-05-08 15:23:57.053 [DEBUG][216945] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40all-ipam-pools" +2024-05-08 15:23:57.053 [DEBUG][216945] felix/ipsets.go 607: Skipping expected Calico IP set. family="inet" setName="cali40this-host" +2024-05-08 15:23:57.053 [DEBUG][216945] felix/ipsets.go 589: Whitelisting IP sets. ID="this-host" family="inet" mainName="cali40this-host" +2024-05-08 15:23:57.053 [DEBUG][216945] felix/ipsets.go 589: Whitelisting IP sets. ID="masq-ipam-pools" family="inet" mainName="cali40masq-ipam-pools" +2024-05-08 15:23:57.052 [DEBUG][216945] felix/ipsets.go 589: Whitelisting IP sets. ID="all-ipam-pools" family="inet" mainName="cali40all-ipam-pools" +2024-05-08 15:23:57.052 [DEBUG][216945] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40masq-ipam-pools" +2024-05-08 15:23:57.052 [DEBUG][216945] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40all-ipam-pools" +2024-05-08 15:23:57.052 [DEBUG][216945] felix/ipsets.go 467: Found member in dataplane canon=127.0.0.1 family="inet" member="127.0.0.1" setID="this-host" +2024-05-08 15:23:57.052 [DEBUG][216945] felix/ipsets.go 467: Found member in dataplane canon=10.68.0.199 family="inet" member="10.68.0.199" setID="this-host" +2024-05-08 15:23:57.052 [DEBUG][216945] felix/ipsets.go 467: Found member in dataplane canon=127.0.0.0 family="inet" member="127.0.0.0" setID="this-host" +2024-05-08 15:23:57.052 [DEBUG][216945] felix/ipsets.go 426: Parsing IP set. family="inet" setName="cali40this-host" +2024-05-08 15:23:57.050 [DEBUG][216945] felix/ipsets.go 314: Resyncing ipsets with dataplane. family="inet" +2024-05-08 15:23:57.050 [DEBUG][216945] felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4 +2024-05-08 15:23:57.050 [DEBUG][216945] felix/ipsets.go 234: Asked to resync with the dataplane on next update. family="inet" +2024-05-08 15:23:57.050 [DEBUG][216945] felix/xdp_state.go 1004: Updating ipsetIDsToMembers cache. family=4 +2024-05-08 15:23:57.050 [DEBUG][216945] felix/xdp_state.go 1605: Getting member changes. family=4 oldMembers=map[string]set.Set[string]{} +2024-05-08 15:23:57.050 [DEBUG][216945] felix/xdp_state.go 968: Processing member updates. family=4 +2024-05-08 15:23:57.050 [DEBUG][216945] felix/xdp_state.go 1932: Finished processing BPF actions. family="ipv4" +2024-05-08 15:23:57.050 [DEBUG][216945] felix/xdp_state.go 1798: Processing BPF actions. family="ipv4" +2024-05-08 15:23:57.050 [DEBUG][216945] felix/xdp_state.go 1270: Finished processing pending diff state. bpfActions=intdataplane.xdpBPFActions{CreateMap:set.Typed[string]{}, RemoveMap:set.Typed[string]{}, AddToMap:map[string]map[string]uint32{}, RemoveFromMap:map[string]map[string]uint32{}, InstallXDP:set.Typed[string]{}, UninstallXDP:set.Typed[string]{}, MembersToDrop:map[string]map[string]uint32{}, MembersToAdd:map[string]map[string]uint32{}} family=4 newCS=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} +2024-05-08 15:23:57.050 [DEBUG][216945] felix/xdp_state.go 1043: Processing pending diff state. cs=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} family=4 +2024-05-08 15:23:57.050 [DEBUG][216945] felix/endpoint_mgr.go 443: Reporting endpoint status. dirtyEndpoints=set.Set{} +2024-05-08 15:23:57.050 [DEBUG][216945] felix/int_dataplane.go 1807: Applying dataplane updates +2024-05-08 15:23:57.049 [DEBUG][216945] felix/int_dataplane.go 1773: Refreshing IP sets state +bird: Netlink: No route to host +bird: Netlink: No route to host +2024-05-08 15:23:57.001 [INFO][60] felix/summary.go 100: Summarising 20 dataplane reconciliation loops over 1m2.9s: avg=13ms longest=171ms (resync-nat-v4) +2024-05-08 15:23:56.988 [INFO][55] felix/summary.go 100: Summarising 9 dataplane reconciliation loops over 1m2.7s: avg=15ms longest=110ms (resync-nat-v4) +2024-05-08 15:23:56.932 [DEBUG][3440577] felix/sync_client.go 356: Pong sent to Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type="" +2024-05-08 15:23:56.932 [DEBUG][3440577] felix/sync_client.go 347: Ping received from Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type="" +2024-05-08 15:23:56.931 [DEBUG][3440577] felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, 56, 932354016, time.Local)}} type="" +2024-05-08 15:23:56.624 [INFO][76] felix/summary.go 100: Summarising 1 dataplane reconciliation loops over 200ms: avg=10ms longest=10ms (resync-routes-v4,resync-routes-v4,resync-rules-v4,resync-wg) +2024-05-08 15:23:56.624 [DEBUG][76] felix/int_dataplane.go 2080: Asked to reschedule. delay=53.068737416s +2024-05-08 15:23:56.624 [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="azv6767b9519e3" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.624 [DEBUG][76] felix/route_table.go 915: Route is correct dest=10.68.10.166/32 ifaceName="azv6767b9519e3" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.624 [DEBUG][76] felix/route_table.go 880: Processing route: 254 13 10.68.10.166/32 ifaceName="azv6767b9519e3" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.624 [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="azv6767b9519e3" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.624 [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="azv6767b9519e3" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.624 [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="azvddd03b40b4a" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.624 [DEBUG][76] felix/route_table.go 915: Route is correct dest=10.68.10.223/32 ifaceName="azvddd03b40b4a" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.624 [DEBUG][76] felix/route_table.go 880: Processing route: 254 9 10.68.10.223/32 ifaceName="azvddd03b40b4a" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.624 [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="azvddd03b40b4a" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.624 [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="azvddd03b40b4a" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.624 [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="azvd32f7c1c18e" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.623 [DEBUG][76] felix/route_table.go 915: Route is correct dest=10.68.10.17/32 ifaceName="azvd32f7c1c18e" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.623 [DEBUG][76] felix/route_table.go 880: Processing route: 254 7 10.68.10.17/32 ifaceName="azvd32f7c1c18e" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.623 [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="azvd32f7c1c18e" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.623 [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="azvd32f7c1c18e" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.623 [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="azv1e0e3e8aac0" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.623 [DEBUG][76] felix/route_table.go 915: Route is correct dest=10.68.10.192/32 ifaceName="azv1e0e3e8aac0" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.623 [DEBUG][76] felix/route_table.go 880: Processing route: 254 5 10.68.10.192/32 ifaceName="azv1e0e3e8aac0" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.623 [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="azv1e0e3e8aac0" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="azv1e0e3e8aac0" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="azv24bd4f90868" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 915: Route is correct dest=10.68.10.133/32 ifaceName="azv24bd4f90868" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 880: Processing route: 254 37 10.68.10.133/32 ifaceName="azv24bd4f90868" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="azv24bd4f90868" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="azv24bd4f90868" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="azv7209a4b4cbc" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 915: Route is correct dest=10.68.10.18/32 ifaceName="azv7209a4b4cbc" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 880: Processing route: 254 35 10.68.10.18/32 ifaceName="azv7209a4b4cbc" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="azv7209a4b4cbc" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="azv7209a4b4cbc" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="azvd9f11c4f109" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 915: Route is correct dest=10.68.10.207/32 ifaceName="azvd9f11c4f109" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 880: Processing route: 254 33 10.68.10.207/32 ifaceName="azvd9f11c4f109" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.622 [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="azvd9f11c4f109" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.621 [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="azvd9f11c4f109" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.621 [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="azve1df6b75675" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.621 [DEBUG][76] felix/route_table.go 915: Route is correct dest=10.68.10.209/32 ifaceName="azve1df6b75675" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.621 [DEBUG][76] felix/route_table.go 880: Processing route: 254 17 10.68.10.209/32 ifaceName="azve1df6b75675" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.621 [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="azve1df6b75675" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.621 [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="azve1df6b75675" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.621 [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="azv443ad95a1ab" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.620 [DEBUG][76] felix/route_table.go 915: Route is correct dest=10.68.10.151/32 ifaceName="azv443ad95a1ab" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.620 [DEBUG][76] felix/route_table.go 880: Processing route: 254 11 10.68.10.151/32 ifaceName="azv443ad95a1ab" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.620 [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="azv443ad95a1ab" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.620 [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="azv443ad95a1ab" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.620 [DEBUG][76] felix/route_table.go 557: Resync: found calico-owned interface ifaceName="azv24bd4f90868" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.620 [DEBUG][76] felix/route_table.go 557: Resync: found calico-owned interface ifaceName="azv7209a4b4cbc" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 557: Resync: found calico-owned interface ifaceName="azvd9f11c4f109" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 614: Synchronised routes on interface ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1 +2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 557: Resync: found calico-owned interface ifaceName="azve1df6b75675" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 557: Resync: found calico-owned interface ifaceName="azv6767b9519e3" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 557: Resync: found calico-owned interface ifaceName="azv443ad95a1ab" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 686: Reconcile against kernel programming ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1 +2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 557: Resync: found calico-owned interface ifaceName="azvddd03b40b4a" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 661: Syncing interface routes ifaceName="*NoOIF*" ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1 +2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 557: Resync: found calico-owned interface ifaceName="azvd32f7c1c18e" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 584: Flag no OIF for full re-sync +2024-05-08 15:23:56.619 [DEBUG][76] felix/route_table.go 557: Resync: found calico-owned interface ifaceName="azv1e0e3e8aac0" ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.617 [DEBUG][76] felix/wireguard.go 1503: Wireguard is disabled and does not exist ifaceName="wireguard.cali" ipVersion=0x4 +2024-05-08 15:23:56.615 [DEBUG][76] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="mangle" +2024-05-08 15:23:56.615 [DEBUG][76] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="mangle" +2024-05-08 15:23:56.615 [DEBUG][76] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="filter" +2024-05-08 15:23:56.615 [DEBUG][76] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="nat" +2024-05-08 15:23:56.615 [DEBUG][76] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="filter" +2024-05-08 15:23:56.615 [DEBUG][76] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="raw" +2024-05-08 15:23:56.615 [DEBUG][76] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="nat" +2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 533: Check interfaces matching regex +2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 533: Check interfaces matching regex +2024-05-08 15:23:56.615 [DEBUG][76] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="raw" +2024-05-08 15:23:56.615 [DEBUG][76] felix/wireguard.go 654: Wireguard is not in-sync - verifying wireguard configuration is removed ipVersion=0x4 +2024-05-08 15:23:56.615 [DEBUG][76] felix/ipsets.go 643: No dirty IP sets. family="inet" +2024-05-08 15:23:56.615 [DEBUG][76] felix/wireguard.go 652: Wireguard is not enabled, skipping sync ipVersion=0x4 +2024-05-08 15:23:56.615 [DEBUG][76] felix/route_rule.go 179: Queueing a resync of routing rules. ipVersion=4 +2024-05-08 15:23:56.615 [DEBUG][2460733] felix/health.go 167: Health: ready +2024-05-08 15:23:56.615 [DEBUG][2460733] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 480: Queueing a resync of routing table. ifaceRegex="^wireguard.cali$" ipVersion=0x4 tableIndex=1 +2024-05-08 15:23:56.615 [DEBUG][76] felix/wireguard.go 605: Queueing a resync of wireguard configuration ipVersion=0x4 +2024-05-08 15:23:56.615 [DEBUG][76] felix/route_table.go 480: Queueing a resync of routing table. ifaceRegex="^azv.*" ipVersion=0x4 tableIndex=0 +2024-05-08 15:23:56.615 [DEBUG][2460733] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1869a51f0aebd26, ext:121042042, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:56.615 [DEBUG][2460733] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870421808718e2, ext:108350313137718, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:56.615 [DEBUG][2460733] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422c5d0a3f1, ext:108355401843625, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:56.615 [DEBUG][76] felix/xdp_state.go 1004: Updating ipsetIDsToMembers cache. family=4 +2024-05-08 15:23:56.615 [DEBUG][76] felix/xdp_state.go 1605: Getting member changes. family=4 oldMembers=map[string]set.Set[string]{} +2024-05-08 15:23:56.615 [DEBUG][2460733] felix/health.go 152: GET /readiness +2024-05-08 15:23:56.615 [DEBUG][76] felix/xdp_state.go 968: Processing member updates. family=4 +2024-05-08 15:23:56.614 [DEBUG][76] felix/xdp_state.go 1932: Finished processing BPF actions. family="ipv4" +2024-05-08 15:23:56.614 [DEBUG][76] felix/xdp_state.go 1798: Processing BPF actions. family="ipv4" +2024-05-08 15:23:56.614 [DEBUG][76] felix/xdp_state.go 1270: Finished processing pending diff state. bpfActions=intdataplane.xdpBPFActions{CreateMap:set.Typed[string]{}, RemoveMap:set.Typed[string]{}, AddToMap:map[string]map[string]uint32{}, RemoveFromMap:map[string]map[string]uint32{}, InstallXDP:set.Typed[string]{}, UninstallXDP:set.Typed[string]{}, MembersToDrop:map[string]map[string]uint32{}, MembersToAdd:map[string]map[string]uint32{}} family=4 newCS=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} +2024-05-08 15:23:56.614 [DEBUG][76] felix/xdp_state.go 1043: Processing pending diff state. cs=&intdataplane.xdpSystemState{IfaceNameToData:map[string]intdataplane.xdpIfaceData{}, XDPEligiblePolicies:map[proto.PolicyID]intdataplane.xdpRules{}} family=4 +2024-05-08 15:23:56.614 [DEBUG][76] felix/endpoint_mgr.go 443: Reporting endpoint status. dirtyEndpoints=set.Set{} +2024-05-08 15:23:56.614 [DEBUG][76] felix/int_dataplane.go 1807: Applying dataplane updates +2024-05-08 15:23:56.614 [DEBUG][76] felix/int_dataplane.go 1777: Refreshing routes +2024-05-08 15:23:56.518 [DEBUG][3880360] felix/sync_client.go 356: Pong sent to Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type="" +2024-05-08 15:23:56.518 [DEBUG][3880360] felix/sync_client.go 347: Ping received from Typha connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} type="" +2024-05-08 15:23:56.518 [DEBUG][3880360] felix/sync_client.go 434: New message from Typha. connID=0x0 connection=&discovery.Typha{Addr:"", IP:"", NodeName:(*string)(nil)} envelope=syncproto.Envelope{Message:syncproto.MsgPing{Timestamp:time.Date(2024, time.May, 8, 15, 23, 56, 513172267, time.Local)}} type="" +2024-05-08 15:23:56.504 [DEBUG][3503680] felix/health.go 167: Health: ready +2024-05-08 15:23:56.504 [DEBUG][3503680] felix/health.go 245: Calculated health summary healthResult=&health.HealthReport{Live:true, Ready:true, Detail:"+------------------+---------+----------------+-----------------+--------+\n| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL |\n+------------------+---------+----------------+-----------------+--------+\n| async_calc_graph | 20s | reporting live | reporting ready | |\n| felix-startup | 0s | reporting live | reporting ready | |\n| int_dataplane | 1m30s | reporting live | reporting ready | |\n+------------------+---------+----------------+-----------------+--------+"} +2024-05-08 15:23:56.504 [DEBUG][3503680] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"async_calc_graph", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:20000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422c14a5fff, ext:583520375548541, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:56.503 [DEBUG][3503680] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"int_dataplane", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:90000000000, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc1870422b936dde6, ext:583520313794048, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:56.503 [DEBUG][3503680] felix/health.go 196: Checking state of reporter reporter=&health.reporterState{name:"felix-startup", reports:health.HealthReport{Live:true, Ready:true, Detail:""}, timeout:0, latest:health.HealthReport{Live:true, Ready:true, Detail:""}, timestamp:time.Time{wall:0xc184ca4aadf24db3, ext:124751409, loc:(*time.Location)(0x4ce3aa0)}} +2024-05-08 15:23:56.503 [DEBUG][3503680] felix/health.go 152: GET /readiness +2024-05-08 15:23:56.403 [INFO][615489] felix/summary.go 100: Summarising 1 dataplane reconciliation loops over 600ms: avg=119ms longest=119ms (resync-filter-v4) +2024-05-08 15:23:56.403 [DEBUG][615489] felix/int_dataplane.go 2080: Asked to reschedule. delay=3.708014365s +2024-05-08 15:23:56.403 [DEBUG][615489] felix/table.go 1263: Update ended up being no-op, skipping call to ip(6)tables-restore. ipVersion=0x4 table="filter" +2024-05-08 15:23:56.403 [DEBUG][615489] felix/table.go 1233: In nftables mode, restarting transaction between updates and deletions. ipVersion=0x4 table="filter" +2024-05-08 15:23:56.403 [DEBUG][615489] felix/table.go 699: Finished loading iptables state ipVersion=0x4 table="filter" +2024-05-08 15:23:56.403 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-_8C_MHVnZxZL2yzVTdL" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.403 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-from-wl-dispatch-b" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.403 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-_qr-cFgKHOI4CiiUEEX" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.403 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-ksa.startup.default" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.403 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-FORWARD" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.403 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-_78B28-fZujIjQTQ2aI" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-_qr-cFgKHOI4CiiUEEX" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azv6150b147a13" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-INPUT" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-from-host-endpoint" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="KUBE-KUBELET-CANARY" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="OUTPUT" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-_bIstdR4kHzECSBOYzE" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-po-_egZPMuCAYhdQrSFKEdx" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-tw-azveba581ecf8a" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-to-wl-dispatch-d" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-kns.promtail-ops" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-_dNlalb5riOo0HprCVK" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-wl-to-host" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-from-hep-forward" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-kns.pyroscope-ebpf" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azv5e22ba18f35" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azveba581ecf8a" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="KUBE-SERVICES" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-_78B28-fZujIjQTQ2aI" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-kns.kafka" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-kns.tempo-dev-04" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-_Cf61GtMrabGXzL475a" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="KUBE-FIREWALL" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-kns.promtail-ops" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-_z2WZWMY7zWPbK7Yrdg" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-kns.flagger" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azv50e07b3b254" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.402 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azvc6bab8b4e1d" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-_vkEv04uT7uJLypDPj4" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-tw-azvd598a193ac9" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-to-wl-dispatch" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-ksa.faro.default" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-kns.insight-logs" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-_EVTW2dJnw0ngsnSIGm" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-_RGYRnA6WWyiolzESnA" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azv50d262c36a0" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-kns.flagger" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-kns.etcdoperator" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azv3220e3e0e97" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-_iHiX1NeLmngDlWeIQr" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-tw-azv174375f00d3" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azvc7d1dd1322f" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azv9f9730dbbd0" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-_MlLF1ls2o2aXrWzAZc" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-tw-azv6150b147a13" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-ksa.kafka.kafka" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-tw-azv4ba102e1d04" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-tw-azv7979696b34d" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-tw-azv42b7f3149a5" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-kns.grafana-agent" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-_NnrQ8SadG8nKhTI0RI" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-from-wl-dispatch-3" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="KUBE-FORWARD" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-_iHiX1NeLmngDlWeIQr" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.401 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-kns.kafka" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-tw-azv3220e3e0e97" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="KUBE-EXTERNAL-SERVICES" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-tw-azv91eacc44416" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-from-wl-dispatch-6" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-kns.etcdoperator" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-kns.goldpinger" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-OUTPUT" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azv34fc9e4b538" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azvf5f02d7b58b" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-tw-azv50e07b3b254" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-_Cf61GtMrabGXzL475a" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-kns.mimir-dev-10" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="KUBE-NODEPORTS" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-_6AKqcqFpZKPrxcNd6V" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azvc18d6f79e1b" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-tw-azvb7645b63ec8" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-tw-azvf24cb567061" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-_JQRittDbqL-wmdleYd" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-_6AKqcqFpZKPrxcNd6V" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-to-wl-dispatch-5" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-kns.mimir-dev-10" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-to-wl-dispatch-9" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azv4ba102e1d04" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-kns.loki-dev-009" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.400 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-cidr-block" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azv6d221b2c167" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-from-wl-dispatch-9" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-to-host-endpoint" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azvb7645b63ec8" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-from-wl-dispatch" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azv42b7f3149a5" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azvdf4e2fe51f0" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-tw-azvdf4e2fe51f0" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-kns.startup" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-to-wl-dispatch-6" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-tw-azv49368b7c0ff" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-to-wl-dispatch-f" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-to-wl-dispatch-b" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-_CnwSB8AYkQsuBjYgMA" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-kns.loki-dev-009" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-_JQRittDbqL-wmdleYd" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="INPUT" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pro-kns.grafana-agent" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-fw-azvb60dcb1a3d1" ipVersion=0x4 table="filter" +2024-05-08 15:23:56.399 [DEBUG][615489] felix/table.go 677: Skipping expected chain chainName="cali-pri-_RGYRnA6WWyiolzESnA" ipVersion=0x4 table="filter" \ No newline at end of file diff --git a/pkg/pattern/drain/testdata/custom.txt b/pkg/pattern/drain/testdata/custom.txt deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/pkg/pattern/drain/testdata/distributor-logfmt.txt b/pkg/pattern/drain/testdata/distributor-logfmt.txt new file mode 100644 index 0000000000000..a19501d55cfb3 --- /dev/null +++ b/pkg/pattern/drain/testdata/distributor-logfmt.txt @@ -0,0 +1,5000 @@ +ts=2024-05-02T12:17:25.606619951Z caller=http.go:194 level=debug traceID=54680f2e6c3a09c4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 33.598849ms" +ts=2024-05-02T12:17:25.60442226Z caller=http.go:194 level=debug traceID=73dc04ae5f732a9c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 432.924µs" +ts=2024-05-02T12:17:25.599739746Z caller=http.go:194 level=debug traceID=106fb4013356fe22 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.236233ms" +ts=2024-05-02T12:17:25.590019347Z caller=http.go:194 level=debug traceID=0bf180e96a5c7252 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.242655ms" +ts=2024-05-02T12:17:25.587911654Z caller=http.go:194 level=debug traceID=3ffa798c3efc4a02 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.955091ms" +ts=2024-05-02T12:17:25.584162903Z caller=http.go:194 level=debug traceID=521d33ad6b715205 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.670881ms" +ts=2024-05-02T12:17:25.581865959Z caller=http.go:194 level=debug traceID=27cad68ebd6282f2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.937261ms" +ts=2024-05-02T12:17:25.580616866Z caller=http.go:194 level=debug traceID=53dc1799237900ca orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.94478ms" +ts=2024-05-02T12:17:25.57631071Z caller=http.go:194 level=debug traceID=3ffa798c3efc4a02 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.819685ms" +ts=2024-05-02T12:17:25.555675027Z caller=http.go:194 level=debug traceID=0947dfbb4ed5c2fa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 281.36µs" +ts=2024-05-02T12:17:25.553062868Z caller=http.go:194 level=debug traceID=37756744fe3aff4b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.775078ms" +ts=2024-05-02T12:17:25.550525652Z caller=http.go:194 level=debug traceID=7bf3ac91856b5cff orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.175432ms" +ts=2024-05-02T12:17:25.550108807Z caller=http.go:194 level=debug traceID=258888958ddd31c7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.334342ms" +ts=2024-05-02T12:17:25.549668734Z caller=http.go:194 level=debug traceID=3d024309e1e19104 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.698824ms" +ts=2024-05-02T12:17:25.53387123Z caller=http.go:194 level=debug traceID=4de701e732dc069f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.450344ms" +ts=2024-05-02T12:17:25.527895527Z caller=http.go:194 level=debug traceID=0275389cdd385dd1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.346484ms" +ts=2024-05-02T12:17:25.524542043Z caller=http.go:194 level=debug traceID=61297a5f2442faad orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.735735ms" +ts=2024-05-02T12:17:25.522406939Z caller=http.go:194 level=debug traceID=344d454598815a0c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.743765ms" +ts=2024-05-02T12:17:25.517521259Z caller=http.go:194 level=debug traceID=6f5dfc14b18530ef orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.350807ms" +ts=2024-05-02T12:17:25.516421689Z caller=http.go:194 level=debug traceID=6b69c81fe51598fc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.624356ms" +ts=2024-05-02T12:17:25.506129536Z caller=http.go:194 level=debug traceID=525fdcd8e5076eb7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.315601ms" +ts=2024-05-02T12:17:25.505583304Z caller=http.go:194 level=debug traceID=6b69c81fe51598fc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.470572ms" +ts=2024-05-02T12:17:25.501874134Z caller=http.go:194 level=debug traceID=7ed9763c2de16a74 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.348238ms" +ts=2024-05-02T12:17:25.488911364Z caller=http.go:194 level=debug traceID=329bde24c5b390bd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.75338ms" +ts=2024-05-02T12:17:25.485790241Z caller=http.go:194 level=debug traceID=68e0f8d5355f3279 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.630084ms" +ts=2024-05-02T12:17:25.482050976Z caller=http.go:194 level=debug traceID=30d07bb8936f04ca orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.947136ms" +ts=2024-05-02T12:17:25.480906859Z caller=http.go:194 level=debug traceID=7b34ff86de91f4c6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.148042ms" +ts=2024-05-02T12:17:25.480739793Z caller=http.go:194 level=debug traceID=5c4923ad4ddfdef2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 334.482µs" +ts=2024-05-02T12:17:25.470892529Z caller=http.go:194 level=debug traceID=30d07bb8936f04ca orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.388469ms" +ts=2024-05-02T12:17:25.45893809Z caller=http.go:194 level=debug traceID=568c17f6345bddd3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.663408ms" +ts=2024-05-02T12:17:25.455054261Z caller=http.go:194 level=debug traceID=7c2ec81ebe8363c9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.140158ms" +ts=2024-05-02T12:17:25.454710287Z caller=http.go:194 level=debug traceID=70c0ad75b6fa3e63 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.970114ms" +ts=2024-05-02T12:17:25.454473244Z caller=http.go:194 level=debug traceID=3616f304ba44f994 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.569612ms" +ts=2024-05-02T12:17:25.453432581Z caller=http.go:194 level=debug traceID=6aeb6c48c503a1b6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.798535ms" +ts=2024-05-02T12:17:25.448865281Z caller=http.go:194 level=debug traceID=0d15267c7b04b1a0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.210893ms" +ts=2024-05-02T12:17:25.440848077Z caller=http.go:194 level=debug traceID=53341c9c7e939a44 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.204773ms" +ts=2024-05-02T12:17:25.440496464Z caller=http.go:194 level=debug traceID=00c0d69ca80a0098 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.780895ms" +ts=2024-05-02T12:17:25.434380893Z caller=http.go:194 level=debug traceID=11994c6c36db1677 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.243455ms" +ts=2024-05-02T12:17:25.429227263Z caller=http.go:194 level=debug traceID=00c0d69ca80a0098 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.985284ms" +ts=2024-05-02T12:17:25.426654548Z caller=http.go:194 level=debug traceID=0e57f5287d1290e6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.480223ms" +ts=2024-05-02T12:17:25.426274633Z caller=http.go:194 level=debug traceID=1712a2847747140c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.948023ms" +ts=2024-05-02T12:17:25.422786285Z caller=http.go:194 level=debug traceID=1de723b85a009185 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.568956ms" +ts=2024-05-02T12:17:25.422640918Z caller=http.go:194 level=debug traceID=11994c6c36db1677 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.847423ms" +ts=2024-05-02T12:17:25.417297532Z caller=http.go:194 level=debug traceID=33e64c27f014080c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.667706ms" +ts=2024-05-02T12:17:25.416002693Z caller=http.go:194 level=debug traceID=61fb3379fa7bce32 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.212419ms" +ts=2024-05-02T12:17:25.410088342Z caller=http.go:194 level=debug traceID=524c635ce2163d12 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.766328ms" +ts=2024-05-02T12:17:25.402149099Z caller=http.go:194 level=debug traceID=2bdfcd34cc3efe13 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.736005ms" +ts=2024-05-02T12:17:25.400494146Z caller=http.go:194 level=debug traceID=554f5aa7c7f32062 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.10752ms" +ts=2024-05-02T12:17:25.396544858Z caller=http.go:194 level=debug traceID=6b4e4366e4325cbe orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.087951ms" +ts=2024-05-02T12:17:25.39563361Z caller=http.go:194 level=debug traceID=75ab4d926b3f0f26 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.879492ms" +ts=2024-05-02T12:17:25.390382475Z caller=http.go:194 level=debug traceID=7d85afd7efc78f0f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.044812ms" +ts=2024-05-02T12:17:25.390221904Z caller=http.go:194 level=debug traceID=3a4d0c81b1775b96 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.182788ms" +ts=2024-05-02T12:17:25.389502838Z caller=http.go:194 level=debug traceID=554f5aa7c7f32062 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.39152ms" +ts=2024-05-02T12:17:25.378820411Z caller=http.go:194 level=debug traceID=374fb94594bc0ed9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.136578ms" +ts=2024-05-02T12:17:25.378044909Z caller=http.go:194 level=debug traceID=342fa828fc52cd87 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.141486ms" +ts=2024-05-02T12:17:25.376410304Z caller=http.go:194 level=debug traceID=328ad41d648b83d5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.877644ms" +ts=2024-05-02T12:17:25.375179998Z caller=http.go:194 level=debug traceID=242bb7178fef7d5f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.809736ms" +ts=2024-05-02T12:17:25.373741039Z caller=http.go:194 level=debug traceID=1a18ef57a73cb998 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.873323ms" +ts=2024-05-02T12:17:25.37270492Z caller=http.go:194 level=debug traceID=58503b3bc96ce571 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 17.201971ms" +ts=2024-05-02T12:17:25.369200732Z caller=http.go:194 level=debug traceID=44f7b7bda247778f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.045912ms" +ts=2024-05-02T12:17:25.368779217Z caller=http.go:194 level=debug traceID=2b68ae1550924c3e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.886975ms" +ts=2024-05-02T12:17:25.36594502Z caller=http.go:194 level=debug traceID=328ad41d648b83d5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.763527ms" +ts=2024-05-02T12:17:25.365489954Z caller=http.go:194 level=debug traceID=374fb94594bc0ed9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.868503ms" +ts=2024-05-02T12:17:25.362456362Z caller=http.go:194 level=debug traceID=242bb7178fef7d5f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.90417ms" +ts=2024-05-02T12:17:25.360990022Z caller=http.go:194 level=debug traceID=58503b3bc96ce571 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.69141ms" +ts=2024-05-02T12:17:25.359548713Z caller=http.go:194 level=debug traceID=6b9e36c6d899d6aa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 370.369µs" +ts=2024-05-02T12:17:25.353606005Z caller=http.go:194 level=debug traceID=3a8f76451d65954d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.641171ms" +ts=2024-05-02T12:17:25.351663731Z caller=http.go:194 level=debug traceID=13f49d458faea016 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.741912ms" +ts=2024-05-02T12:17:25.340054788Z caller=http.go:194 level=debug traceID=13f49d458faea016 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.863733ms" +ts=2024-05-02T12:17:25.339185277Z caller=http.go:194 level=debug traceID=51431a988cb88485 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.452513ms" +ts=2024-05-02T12:17:25.335989896Z caller=http.go:194 level=debug traceID=22fde10e9c5dddd1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.340477ms" +ts=2024-05-02T12:17:25.332200903Z caller=http.go:194 level=debug traceID=4dabdd1f7ef15952 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 312.633µs" +ts=2024-05-02T12:17:25.330527467Z caller=http.go:194 level=debug traceID=2981d65f6b98934e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.809879ms" +ts=2024-05-02T12:17:25.322152999Z caller=http.go:194 level=debug traceID=385da58d71914731 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.745326ms" +ts=2024-05-02T12:17:25.320318877Z caller=http.go:194 level=debug traceID=2d37532ae86c83c3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.888766ms" +ts=2024-05-02T12:17:25.312386307Z caller=http.go:194 level=debug traceID=385da58d71914731 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.169191ms" +ts=2024-05-02T12:17:25.308642313Z caller=http.go:194 level=debug traceID=2d37532ae86c83c3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.01813ms" +ts=2024-05-02T12:17:25.304603288Z caller=http.go:194 level=debug traceID=3d298955948d7e04 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.925404ms" +ts=2024-05-02T12:17:25.302491784Z caller=http.go:194 level=debug traceID=04c62be61d0da581 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 299.472µs" +ts=2024-05-02T12:17:25.299392259Z caller=http.go:194 level=debug traceID=15f61edb6287abd5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.007979ms" +ts=2024-05-02T12:17:25.29263937Z caller=http.go:194 level=debug traceID=1d91497117ca4ccb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.873257ms" +ts=2024-05-02T12:17:25.291902161Z caller=http.go:194 level=debug traceID=04cd746159d2f6b9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.866818ms" +ts=2024-05-02T12:17:25.291277667Z caller=http.go:194 level=debug traceID=04c62be61d0da581 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 403.85µs" +ts=2024-05-02T12:17:25.287982027Z caller=http.go:194 level=debug traceID=15f61edb6287abd5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.270307ms" +ts=2024-05-02T12:17:25.274111578Z caller=http.go:194 level=debug traceID=7c25b8b98fe1d1ab orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.817682ms" +ts=2024-05-02T12:17:25.274000456Z caller=http.go:194 level=debug traceID=6d08f732b60d979c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.233881ms" +ts=2024-05-02T12:17:25.270771922Z caller=http.go:194 level=debug traceID=2129a2c07245b346 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 18.233272ms" +ts=2024-05-02T12:17:25.26685539Z caller=http.go:194 level=debug traceID=2fa7d411a4da0531 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 15.558906ms" +ts=2024-05-02T12:17:25.266393197Z caller=http.go:194 level=debug traceID=72657f3fcbbe4385 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 16.018894ms" +ts=2024-05-02T12:17:25.26374617Z caller=http.go:194 level=debug traceID=61c7319050c2b325 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.707939ms" +ts=2024-05-02T12:17:25.26335051Z caller=http.go:194 level=debug traceID=6d08f732b60d979c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.676556ms" +ts=2024-05-02T12:17:25.262501145Z caller=http.go:194 level=debug traceID=7c25b8b98fe1d1ab orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.001743ms" +ts=2024-05-02T12:17:25.259899617Z caller=http.go:194 level=debug traceID=2129a2c07245b346 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 36.854684ms" +ts=2024-05-02T12:17:25.258059664Z caller=http.go:194 level=debug traceID=30866716538fcb6b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.27679ms" +ts=2024-05-02T12:17:25.255880309Z caller=http.go:194 level=debug traceID=273746706cb04509 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.086707ms" +ts=2024-05-02T12:17:25.252406171Z caller=http.go:194 level=debug traceID=716b25ef921e1246 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.573873ms" +ts=2024-05-02T12:17:25.252123429Z caller=http.go:194 level=debug traceID=4510a8d04d97c9c6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.534608ms" +ts=2024-05-02T12:17:25.250116614Z caller=http.go:194 level=debug traceID=4d824ab51d503786 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.821069ms" +ts=2024-05-02T12:17:25.242540247Z caller=http.go:194 level=debug traceID=10fc09ad575c528b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.966328ms" +ts=2024-05-02T12:17:25.241481534Z caller=http.go:194 level=debug traceID=4bfd733bd667b3c2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.060818ms" +ts=2024-05-02T12:17:25.240295978Z caller=http.go:194 level=debug traceID=12f5dc41fd203e42 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.905532ms" +ts=2024-05-02T12:17:25.237696764Z caller=http.go:194 level=debug traceID=4510a8d04d97c9c6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.684943ms" +ts=2024-05-02T12:17:25.237158097Z caller=http.go:194 level=debug traceID=6d65e7585e64ed79 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.540363ms" +ts=2024-05-02T12:17:25.236656041Z caller=http.go:194 level=debug traceID=5443dc3284576117 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.691179ms" +ts=2024-05-02T12:17:25.230417851Z caller=http.go:194 level=debug traceID=10fc09ad575c528b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.661997ms" +ts=2024-05-02T12:17:25.228033985Z caller=http.go:194 level=debug traceID=12f5dc41fd203e42 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.192134ms" +ts=2024-05-02T12:17:25.227116543Z caller=http.go:194 level=debug traceID=1a41e863f0299966 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.475404ms" +ts=2024-05-02T12:17:25.225387148Z caller=http.go:194 level=debug traceID=759d91f404ddbf90 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.127119ms" +ts=2024-05-02T12:17:25.224686939Z caller=http.go:194 level=debug traceID=358a955e06ff7a1e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.333456ms" +ts=2024-05-02T12:17:25.222599977Z caller=http.go:194 level=debug traceID=02c61109bf80b86c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.994519ms" +ts=2024-05-02T12:17:25.219717152Z caller=http.go:194 level=debug traceID=219da8121a98d774 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.562883ms" +ts=2024-05-02T12:17:25.218726825Z caller=http.go:194 level=debug traceID=114e2d4ea2e79363 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 268.144µs" +ts=2024-05-02T12:17:25.213902825Z caller=http.go:194 level=debug traceID=358a955e06ff7a1e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.829017ms" +ts=2024-05-02T12:17:25.207535241Z caller=http.go:194 level=debug traceID=114e2d4ea2e79363 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 346.57µs" +ts=2024-05-02T12:17:25.205017741Z caller=http.go:194 level=debug traceID=299a72efc10e760e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.517527ms" +ts=2024-05-02T12:17:25.200219105Z caller=http.go:194 level=debug traceID=673fedc1d4bfac8d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.853661ms" +ts=2024-05-02T12:17:25.196553084Z caller=http.go:194 level=debug traceID=271dbfb5461d139a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.80007ms" +ts=2024-05-02T12:17:25.195698321Z caller=http.go:194 level=debug traceID=19c498f9dc882de6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.475172ms" +ts=2024-05-02T12:17:25.192764319Z caller=http.go:194 level=debug traceID=0d082129227f5c39 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.010318ms" +ts=2024-05-02T12:17:25.191727949Z caller=http.go:194 level=debug traceID=6d2b385a1e7c2591 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.177096ms" +ts=2024-05-02T12:17:25.19028956Z caller=http.go:194 level=debug traceID=7e7678a264122473 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 517.457µs" +ts=2024-05-02T12:17:25.188474581Z caller=http.go:194 level=debug traceID=2f81de21fa03e525 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.350682ms" +ts=2024-05-02T12:17:25.187861967Z caller=http.go:194 level=debug traceID=673fedc1d4bfac8d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.073588ms" +ts=2024-05-02T12:17:25.184166375Z caller=http.go:194 level=debug traceID=54aaabad6c061c73 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.462756ms" +ts=2024-05-02T12:17:25.180675195Z caller=http.go:194 level=debug traceID=19c498f9dc882de6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.208702ms" +ts=2024-05-02T12:17:25.177964744Z caller=http.go:194 level=debug traceID=0778ddfaf52451dd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.083798ms" +ts=2024-05-02T12:17:25.177954956Z caller=http.go:194 level=debug traceID=1bd26c2a279ecdf0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.382906ms" +ts=2024-05-02T12:17:25.177430157Z caller=http.go:194 level=debug traceID=2f81de21fa03e525 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.980057ms" +ts=2024-05-02T12:17:25.176543026Z caller=http.go:194 level=debug traceID=54aaabad6c061c73 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.608515ms" +ts=2024-05-02T12:17:25.169384529Z caller=http.go:194 level=debug traceID=6b73e52c27732c94 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.534047ms" +ts=2024-05-02T12:17:25.169205187Z caller=http.go:194 level=debug traceID=1bd26c2a279ecdf0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.645198ms" +ts=2024-05-02T12:17:25.151203438Z caller=http.go:194 level=debug traceID=11807acd75fda9ba orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.108617ms" +ts=2024-05-02T12:17:25.149810653Z caller=http.go:194 level=debug traceID=32ddcc537379d808 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.778942ms" +ts=2024-05-02T12:17:25.147356067Z caller=http.go:194 level=debug traceID=631e597bc29ad37c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.083738ms" +ts=2024-05-02T12:17:25.146919008Z caller=http.go:194 level=debug traceID=62a4e915355768a4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.301674ms" +ts=2024-05-02T12:17:25.145733394Z caller=http.go:194 level=debug traceID=723c6a28928d1a87 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.600184ms" +ts=2024-05-02T12:17:25.141951223Z caller=http.go:194 level=debug traceID=02c4d818075748ef orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.027285ms" +ts=2024-05-02T12:17:25.141225785Z caller=http.go:194 level=debug traceID=11807acd75fda9ba orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.245273ms" +ts=2024-05-02T12:17:25.137458264Z caller=http.go:194 level=debug traceID=11c26c1e2ee0439d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 238.153µs" +ts=2024-05-02T12:17:25.137515258Z caller=http.go:194 level=debug traceID=77aa21b5f8c0dc1e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.402598ms" +ts=2024-05-02T12:17:25.137382293Z caller=http.go:194 level=debug traceID=57ee050167fbbf43 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.601833ms" +ts=2024-05-02T12:17:25.136472123Z caller=http.go:194 level=debug traceID=631e597bc29ad37c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.901775ms" +ts=2024-05-02T12:17:25.135914542Z caller=http.go:194 level=debug traceID=3b8c2ee3a6b9bc6f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 446.883µs" +ts=2024-05-02T12:17:25.134844755Z caller=http.go:194 level=debug traceID=723c6a28928d1a87 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.941005ms" +ts=2024-05-02T12:17:25.13295049Z caller=http.go:194 level=debug traceID=6082e8e26d7276bc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.65373ms" +ts=2024-05-02T12:17:25.132927567Z caller=http.go:194 level=debug traceID=124550f5bb19023f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.092577ms" +ts=2024-05-02T12:17:25.129564121Z caller=http.go:194 level=debug traceID=02c4d818075748ef orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.89513ms" +ts=2024-05-02T12:17:25.126220929Z caller=http.go:194 level=debug traceID=193c64c436b4d0b4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.78022ms" +ts=2024-05-02T12:17:25.124876561Z caller=http.go:194 level=debug traceID=749a4671628b0085 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.313642ms" +ts=2024-05-02T12:17:25.123653029Z caller=http.go:194 level=debug traceID=4d722aa9810f0b9c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.361609ms" +ts=2024-05-02T12:17:25.121343887Z caller=http.go:194 level=debug traceID=124550f5bb19023f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.26595ms" +ts=2024-05-02T12:17:25.118144463Z caller=http.go:194 level=debug traceID=0a9221598ce9efc5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.107733ms" +ts=2024-05-02T12:17:25.11310829Z caller=http.go:194 level=debug traceID=749a4671628b0085 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.35369ms" +ts=2024-05-02T12:17:25.11163121Z caller=http.go:194 level=debug traceID=4c2f4a5dde85f1d1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.196818ms" +ts=2024-05-02T12:17:25.110983362Z caller=http.go:194 level=debug traceID=5e89c44e4b85ced2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.756756ms" +ts=2024-05-02T12:17:25.10916612Z caller=http.go:194 level=debug traceID=064b49ce1359cfa7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.739769ms" +ts=2024-05-02T12:17:25.107798329Z caller=http.go:194 level=debug traceID=0a9221598ce9efc5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.001271ms" +ts=2024-05-02T12:17:25.106321322Z caller=http.go:194 level=debug traceID=2e3846837e58823a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.32005ms" +ts=2024-05-02T12:17:25.100135398Z caller=http.go:194 level=debug traceID=4c2f4a5dde85f1d1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.853506ms" +ts=2024-05-02T12:17:25.098400182Z caller=http.go:194 level=debug traceID=0247e7d2e74bd5a3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.971389ms" +ts=2024-05-02T12:17:25.095949019Z caller=http.go:194 level=debug traceID=6a098319c2ab76eb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.695237ms" +ts=2024-05-02T12:17:25.092572888Z caller=http.go:194 level=debug traceID=5d0ed1bb7909c867 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.784855ms" +ts=2024-05-02T12:17:25.0896205Z caller=http.go:194 level=debug traceID=4a7fad7923eb0a7f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.775812ms" +ts=2024-05-02T12:17:25.088321414Z caller=http.go:194 level=debug traceID=751eb258be755a08 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 323.622µs" +ts=2024-05-02T12:17:25.087207799Z caller=http.go:194 level=debug traceID=1cc4a7a25ab82bcc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.273735ms" +ts=2024-05-02T12:17:25.087235577Z caller=http.go:194 level=debug traceID=3d8a3b99dd14c2b8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.257457ms" +ts=2024-05-02T12:17:25.085561241Z caller=http.go:194 level=debug traceID=7a39e17615aadfe0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.823859ms" +ts=2024-05-02T12:17:25.084493976Z caller=http.go:194 level=debug traceID=7d056c47b35d7178 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 19.583125ms" +ts=2024-05-02T12:17:25.083414216Z caller=http.go:194 level=debug traceID=3307aea890016f1a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.979572ms" +ts=2024-05-02T12:17:25.082542833Z caller=http.go:194 level=debug traceID=6a098319c2ab76eb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.344275ms" +ts=2024-05-02T12:17:25.081256938Z caller=http.go:194 level=debug traceID=724dda9570272c28 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 919.387µs" +ts=2024-05-02T12:17:25.077137195Z caller=http.go:194 level=debug traceID=0d35ab7d023ec5b7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.958796ms" +ts=2024-05-02T12:17:25.077092076Z caller=http.go:194 level=debug traceID=751eb258be755a08 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 311.895µs" +ts=2024-05-02T12:17:25.076119725Z caller=http.go:194 level=debug traceID=4a7fad7923eb0a7f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.204224ms" +ts=2024-05-02T12:17:25.074731893Z caller=http.go:194 level=debug traceID=1cc4a7a25ab82bcc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.385343ms" +ts=2024-05-02T12:17:25.074259967Z caller=http.go:194 level=debug traceID=7a39e17615aadfe0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.953776ms" +ts=2024-05-02T12:17:25.071398953Z caller=http.go:194 level=debug traceID=36044192dfd08b56 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.163352ms" +ts=2024-05-02T12:17:25.070157729Z caller=http.go:194 level=debug traceID=5f23e0682b44d5a0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.041019ms" +ts=2024-05-02T12:17:25.069460244Z caller=http.go:194 level=debug traceID=7d056c47b35d7178 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 20.746966ms" +ts=2024-05-02T12:17:25.067567332Z caller=http.go:194 level=debug traceID=78ad4e05990798a9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.950049ms" +ts=2024-05-02T12:17:25.066063987Z caller=http.go:194 level=debug traceID=0d35ab7d023ec5b7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.831738ms" +ts=2024-05-02T12:17:25.065286679Z caller=http.go:194 level=debug traceID=34621c0fb1910149 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.547263ms" +ts=2024-05-02T12:17:25.064942512Z caller=http.go:194 level=debug traceID=1fbf9dbc4d1dbea0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.904344ms" +ts=2024-05-02T12:17:25.063200621Z caller=http.go:194 level=debug traceID=14efa5236129de3f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 350.928µs" +ts=2024-05-02T12:17:25.058828857Z caller=http.go:194 level=debug traceID=5f23e0682b44d5a0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.881212ms" +ts=2024-05-02T12:17:25.058206823Z caller=http.go:194 level=debug traceID=03b41eca02d8006b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.01503ms" +ts=2024-05-02T12:17:25.057508372Z caller=http.go:194 level=debug traceID=040648ac1d683ebc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.198362ms" +ts=2024-05-02T12:17:25.057534889Z caller=http.go:194 level=debug traceID=51b0c6ac0a09dfd4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.180401ms" +ts=2024-05-02T12:17:25.057592242Z caller=http.go:194 level=debug traceID=3d57ceaea53592ed orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.684716ms" +ts=2024-05-02T12:17:25.055196783Z caller=http.go:194 level=debug traceID=117d39e46866773c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.476084ms" +ts=2024-05-02T12:17:25.055179717Z caller=http.go:194 level=debug traceID=2755218125096949 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.741775ms" +ts=2024-05-02T12:17:25.05426845Z caller=http.go:194 level=debug traceID=6c9cce3f604e4f1a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.71631ms" +ts=2024-05-02T12:17:25.054093106Z caller=http.go:194 level=debug traceID=09dc0aeb81bf025e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.447323ms" +ts=2024-05-02T12:17:25.052742286Z caller=http.go:194 level=debug traceID=34621c0fb1910149 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.352717ms" +ts=2024-05-02T12:17:25.047222331Z caller=http.go:194 level=debug traceID=5781ed44e723f24b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.165895ms" +ts=2024-05-02T12:17:25.046058334Z caller=http.go:194 level=debug traceID=20a1c5cee4557881 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.895485ms" +ts=2024-05-02T12:17:25.044974184Z caller=http.go:194 level=debug traceID=580969d52b3510db orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.638702ms" +ts=2024-05-02T12:17:25.044043496Z caller=http.go:194 level=debug traceID=2755218125096949 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.137306ms" +ts=2024-05-02T12:17:25.042910855Z caller=http.go:194 level=debug traceID=09dc0aeb81bf025e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.463855ms" +ts=2024-05-02T12:17:25.042265669Z caller=http.go:194 level=debug traceID=796d9760b8713b64 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.073863ms" +ts=2024-05-02T12:17:25.038889835Z caller=http.go:194 level=debug traceID=57a7aad1355931c2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.447236ms" +ts=2024-05-02T12:17:25.038835794Z caller=http.go:194 level=debug traceID=57f77551e9d72670 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.908715ms" +ts=2024-05-02T12:17:25.038088152Z caller=http.go:194 level=debug traceID=7283c5742827d73d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.315848ms" +ts=2024-05-02T12:17:25.033716575Z caller=http.go:194 level=debug traceID=5b965e2d08ba074b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.638253ms" +ts=2024-05-02T12:17:25.033026058Z caller=http.go:194 level=debug traceID=580969d52b3510db orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.442631ms" +ts=2024-05-02T12:17:25.030435113Z caller=http.go:194 level=debug traceID=7125c92248d558c5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.210372ms" +ts=2024-05-02T12:17:25.029583883Z caller=http.go:194 level=debug traceID=66a6d81a592056ee orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.912759ms" +ts=2024-05-02T12:17:25.029428491Z caller=http.go:194 level=debug traceID=02360b17810f7083 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.898731ms" +ts=2024-05-02T12:17:25.027766364Z caller=http.go:194 level=debug traceID=57a7aad1355931c2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.366872ms" +ts=2024-05-02T12:17:25.025226806Z caller=http.go:194 level=debug traceID=563ab6742ed8f29a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.996728ms" +ts=2024-05-02T12:17:25.024623529Z caller=http.go:194 level=debug traceID=1aaea9035dea8ab6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.740339ms" +ts=2024-05-02T12:17:25.024220599Z caller=http.go:194 level=debug traceID=5b965e2d08ba074b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.41451ms" +ts=2024-05-02T12:17:25.023313149Z caller=http.go:194 level=debug traceID=0299c0b459248e11 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.319599ms" +ts=2024-05-02T12:17:25.021474167Z caller=http.go:194 level=debug traceID=0f384b22781073c9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.015223ms" +ts=2024-05-02T12:17:25.021101925Z caller=http.go:194 level=debug traceID=57d135fb16106a74 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.423423ms" +ts=2024-05-02T12:17:25.019867812Z caller=http.go:194 level=debug traceID=02360b17810f7083 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.441254ms" +ts=2024-05-02T12:17:25.018632649Z caller=http.go:194 level=debug traceID=0d60503054f0d330 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.043051ms" +ts=2024-05-02T12:17:25.018345462Z caller=http.go:194 level=debug traceID=04985198f4c558c8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.192365ms" +ts=2024-05-02T12:17:25.016509452Z caller=http.go:194 level=debug traceID=3491f9b56b12f570 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.524827ms" +ts=2024-05-02T12:17:25.015643761Z caller=http.go:194 level=debug traceID=65a0ecfb60b86f89 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.365281ms" +ts=2024-05-02T12:17:25.014364403Z caller=http.go:194 level=debug traceID=563ab6742ed8f29a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.937982ms" +ts=2024-05-02T12:17:25.014196491Z caller=http.go:194 level=debug traceID=1a702c83a1165d4e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.784485ms" +ts=2024-05-02T12:17:25.012488546Z caller=http.go:194 level=debug traceID=2c1dbe6fc6e418e1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.20489ms" +ts=2024-05-02T12:17:25.011940417Z caller=http.go:194 level=debug traceID=768958b83fa027db orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.145852ms" +ts=2024-05-02T12:17:25.011755446Z caller=http.go:194 level=debug traceID=702817f9c7366966 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.609886ms" +ts=2024-05-02T12:17:25.01155224Z caller=http.go:194 level=debug traceID=71827375a2b05a5c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.090737ms" +ts=2024-05-02T12:17:25.010695082Z caller=http.go:194 level=debug traceID=0f384b22781073c9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.65598ms" +ts=2024-05-02T12:17:25.009442122Z caller=http.go:194 level=debug traceID=6b63973452643eed orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.090037ms" +ts=2024-05-02T12:17:25.008815089Z caller=http.go:194 level=debug traceID=3252cf1d7907f3d4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.520453ms" +ts=2024-05-02T12:17:25.007248483Z caller=http.go:194 level=debug traceID=1ce4c0d622861a03 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.124416ms" +ts=2024-05-02T12:17:25.006511799Z caller=http.go:194 level=debug traceID=7a003fe3c5dab553 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 16.401449ms" +ts=2024-05-02T12:17:25.00489163Z caller=http.go:194 level=debug traceID=6f465e6cf0fab257 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.609347ms" +ts=2024-05-02T12:17:25.004491458Z caller=http.go:194 level=debug traceID=65a0ecfb60b86f89 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.687456ms" +ts=2024-05-02T12:17:25.003746641Z caller=http.go:194 level=debug traceID=451be6b771aaecec orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.601263ms" +ts=2024-05-02T12:17:25.003102115Z caller=http.go:194 level=debug traceID=1a702c83a1165d4e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.961392ms" +ts=2024-05-02T12:17:25.002908698Z caller=http.go:194 level=debug traceID=7cece89f8d8c3ccb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.421315ms" +ts=2024-05-02T12:17:25.001772591Z caller=http.go:194 level=debug traceID=702817f9c7366966 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.853171ms" +ts=2024-05-02T12:17:24.998947619Z caller=http.go:194 level=debug traceID=624b6db15cbe9d1c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.01257ms" +ts=2024-05-02T12:17:24.998576116Z caller=http.go:194 level=debug traceID=71827375a2b05a5c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.362255ms" +ts=2024-05-02T12:17:24.998524913Z caller=http.go:194 level=debug traceID=15f8265bf8e9f46a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.142795ms" +ts=2024-05-02T12:17:24.997983929Z caller=http.go:194 level=debug traceID=6b63973452643eed orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.504991ms" +ts=2024-05-02T12:17:24.997355803Z caller=http.go:194 level=debug traceID=7806c1fa62485b94 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.775275ms" +ts=2024-05-02T12:17:24.995271472Z caller=http.go:194 level=debug traceID=65a4824f27be6e9e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.342369ms" +ts=2024-05-02T12:17:24.993725177Z caller=http.go:194 level=debug traceID=7a003fe3c5dab553 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.615809ms" +ts=2024-05-02T12:17:24.9932366Z caller=http.go:194 level=debug traceID=34f758b32e32918e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.892163ms" +ts=2024-05-02T12:17:24.99296215Z caller=http.go:194 level=debug traceID=7cece89f8d8c3ccb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.371736ms" +ts=2024-05-02T12:17:24.992756591Z caller=http.go:194 level=debug traceID=7aba37aee8ec69ef orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.166292ms" +ts=2024-05-02T12:17:24.990788245Z caller=http.go:194 level=debug traceID=2ded777361b2498e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.480514ms" +ts=2024-05-02T12:17:24.99034894Z caller=http.go:194 level=debug traceID=03ae889caa0cd19b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.834156ms" +ts=2024-05-02T12:17:24.990358399Z caller=http.go:194 level=debug traceID=26bcca013b4a3e06 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.131728ms" +ts=2024-05-02T12:17:24.989715393Z caller=http.go:194 level=debug traceID=20ec015b02659997 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.712501ms" +ts=2024-05-02T12:17:24.988331183Z caller=http.go:194 level=debug traceID=164100c0b34b211c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.908646ms" +ts=2024-05-02T12:17:24.988186517Z caller=http.go:194 level=debug traceID=31f28cd10663b731 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.039398ms" +ts=2024-05-02T12:17:24.987927522Z caller=http.go:194 level=debug traceID=15f8265bf8e9f46a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.572798ms" +ts=2024-05-02T12:17:24.987049482Z caller=http.go:194 level=debug traceID=5ab1dbbd305db35b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.625194ms" +ts=2024-05-02T12:17:24.986811322Z caller=http.go:194 level=debug traceID=624b6db15cbe9d1c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.395134ms" +ts=2024-05-02T12:17:24.985305128Z caller=http.go:194 level=debug traceID=12c7b7b0e6d5fb16 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 313.062µs" +ts=2024-05-02T12:17:24.984596961Z caller=http.go:194 level=debug traceID=130e92b67a2231a0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.404779ms" +ts=2024-05-02T12:17:24.982836167Z caller=http.go:194 level=debug traceID=7e258f652b01746b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.838657ms" +ts=2024-05-02T12:17:24.982060167Z caller=http.go:194 level=debug traceID=56344fdf3a879070 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 323.081µs" +ts=2024-05-02T12:17:24.981734585Z caller=http.go:194 level=debug traceID=7aba37aee8ec69ef orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.327777ms" +ts=2024-05-02T12:17:24.981655688Z caller=http.go:194 level=debug traceID=55732940166f1bcd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.694556ms" +ts=2024-05-02T12:17:24.980933611Z caller=http.go:194 level=debug traceID=3725f2597690f982 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.827282ms" +ts=2024-05-02T12:17:24.980381591Z caller=http.go:194 level=debug traceID=03ae889caa0cd19b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.631729ms" +ts=2024-05-02T12:17:24.979775572Z caller=http.go:194 level=debug traceID=2ded777361b2498e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.545148ms" +ts=2024-05-02T12:17:24.977529861Z caller=http.go:194 level=debug traceID=164100c0b34b211c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.144681ms" +ts=2024-05-02T12:17:24.975651163Z caller=http.go:194 level=debug traceID=46ff24d6cb3c801d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.536073ms" +ts=2024-05-02T12:17:24.975583607Z caller=http.go:194 level=debug traceID=56de53b5cb5ac22b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.398214ms" +ts=2024-05-02T12:17:24.973442016Z caller=http.go:194 level=debug traceID=5e938ce39f4fcae5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.421423ms" +ts=2024-05-02T12:17:24.973318137Z caller=http.go:194 level=debug traceID=5897bcf75429d71a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.652144ms" +ts=2024-05-02T12:17:24.972256453Z caller=http.go:194 level=debug traceID=025b887747932574 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.997307ms" +ts=2024-05-02T12:17:24.971902114Z caller=http.go:194 level=debug traceID=60cb63ac8f7c6d9c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.354714ms" +ts=2024-05-02T12:17:24.97149513Z caller=http.go:194 level=debug traceID=7e258f652b01746b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.745018ms" +ts=2024-05-02T12:17:24.971543387Z caller=http.go:194 level=debug traceID=78e5468c4cc9e6ae orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.634977ms" +ts=2024-05-02T12:17:24.971432476Z caller=http.go:194 level=debug traceID=3df966096bf0f05c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.14924ms" +ts=2024-05-02T12:17:24.970677944Z caller=http.go:194 level=debug traceID=56344fdf3a879070 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 316.639µs" +ts=2024-05-02T12:17:24.970157576Z caller=http.go:194 level=debug traceID=55732940166f1bcd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.792676ms" +ts=2024-05-02T12:17:24.969378203Z caller=http.go:194 level=debug traceID=5f1881837fbb6df8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.280959ms" +ts=2024-05-02T12:17:24.968817855Z caller=http.go:194 level=debug traceID=043ffbe28a33766f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.964892ms" +ts=2024-05-02T12:17:24.968166503Z caller=http.go:194 level=debug traceID=5838b9ebe4543c9f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.822473ms" +ts=2024-05-02T12:17:24.967718627Z caller=http.go:194 level=debug traceID=15b1b48fa8be8e92 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.469586ms" +ts=2024-05-02T12:17:24.966910931Z caller=http.go:194 level=debug traceID=07494dd7a294daa9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.723196ms" +ts=2024-05-02T12:17:24.966677887Z caller=http.go:194 level=debug traceID=64f4827ea70c307b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.708363ms" +ts=2024-05-02T12:17:24.966043646Z caller=http.go:194 level=debug traceID=0341017e02cd125a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.201641ms" +ts=2024-05-02T12:17:24.964997317Z caller=http.go:194 level=debug traceID=46ff24d6cb3c801d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.795266ms" +ts=2024-05-02T12:17:24.964227361Z caller=http.go:194 level=debug traceID=2e85a0d8cfff540b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.978511ms" +ts=2024-05-02T12:17:24.962861767Z caller=http.go:194 level=debug traceID=791b2a99df7a1113 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.861254ms" +ts=2024-05-02T12:17:24.962329482Z caller=http.go:194 level=debug traceID=56de53b5cb5ac22b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.14183ms" +ts=2024-05-02T12:17:24.962219615Z caller=http.go:194 level=debug traceID=4dff90ee231ef3cc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.153979ms" +ts=2024-05-02T12:17:24.962078601Z caller=http.go:194 level=debug traceID=2fb7c9ce1623222e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.970096ms" +ts=2024-05-02T12:17:24.961572277Z caller=http.go:194 level=debug traceID=5e938ce39f4fcae5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.914374ms" +ts=2024-05-02T12:17:24.961266322Z caller=http.go:194 level=debug traceID=22fba0de660a2d92 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.911438ms" +ts=2024-05-02T12:17:24.960245382Z caller=http.go:194 level=debug traceID=3df966096bf0f05c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.082948ms" +ts=2024-05-02T12:17:24.959959933Z caller=http.go:194 level=debug traceID=7d874adffd61910f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 15.507108ms" +ts=2024-05-02T12:17:24.959811366Z caller=http.go:194 level=debug traceID=259d7d92757bb591 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.422124ms" +ts=2024-05-02T12:17:24.958293663Z caller=http.go:194 level=debug traceID=61d02347a3756e8d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.181413ms" +ts=2024-05-02T12:17:24.957892508Z caller=http.go:194 level=debug traceID=15b1b48fa8be8e92 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.168041ms" +ts=2024-05-02T12:17:24.957328894Z caller=http.go:194 level=debug traceID=5838b9ebe4543c9f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.993557ms" +ts=2024-05-02T12:17:24.956731898Z caller=http.go:194 level=debug traceID=70cdc3711392e40d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.299777ms" +ts=2024-05-02T12:17:24.956698753Z caller=http.go:194 level=debug traceID=043ffbe28a33766f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.388798ms" +ts=2024-05-02T12:17:24.956474365Z caller=http.go:194 level=debug traceID=0b95a99146ba8132 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.658906ms" +ts=2024-05-02T12:17:24.956320164Z caller=http.go:194 level=debug traceID=675313cb4028c164 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.879348ms" +ts=2024-05-02T12:17:24.956106991Z caller=http.go:194 level=debug traceID=7d874adffd61910f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 23.55577ms" +ts=2024-05-02T12:17:24.951788396Z caller=http.go:194 level=debug traceID=791b2a99df7a1113 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.440726ms" +ts=2024-05-02T12:17:24.950578726Z caller=http.go:194 level=debug traceID=365f7dccb8dccff2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.535167ms" +ts=2024-05-02T12:17:24.950336164Z caller=http.go:194 level=debug traceID=0125244a56e23e38 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.295075ms" +ts=2024-05-02T12:17:24.949693126Z caller=http.go:194 level=debug traceID=259d7d92757bb591 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.579064ms" +ts=2024-05-02T12:17:24.949045865Z caller=http.go:194 level=debug traceID=2fb7c9ce1623222e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.275973ms" +ts=2024-05-02T12:17:24.947747279Z caller=http.go:194 level=debug traceID=0f46e58dfe60543c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.153597ms" +ts=2024-05-02T12:17:24.946986834Z caller=http.go:194 level=debug traceID=61d02347a3756e8d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.958851ms" +ts=2024-05-02T12:17:24.946310241Z caller=http.go:194 level=debug traceID=74e4fb55864e6c25 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.88668ms" +ts=2024-05-02T12:17:24.943033619Z caller=http.go:194 level=debug traceID=43739c1030065c23 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.30212ms" +ts=2024-05-02T12:17:24.942471612Z caller=http.go:194 level=debug traceID=5051d636be32dca6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.521574ms" +ts=2024-05-02T12:17:24.94212918Z caller=http.go:194 level=debug traceID=301c17727318afe1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.083219ms" +ts=2024-05-02T12:17:24.9418339Z caller=http.go:194 level=debug traceID=39ac37ebb6105939 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.444379ms" +ts=2024-05-02T12:17:24.941580772Z caller=http.go:194 level=debug traceID=0125244a56e23e38 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.095499ms" +ts=2024-05-02T12:17:24.941148547Z caller=http.go:194 level=debug traceID=1738f486b8d423a1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.720698ms" +ts=2024-05-02T12:17:24.941083629Z caller=http.go:194 level=debug traceID=349393d37ea279ef orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.903679ms" +ts=2024-05-02T12:17:24.940362848Z caller=http.go:194 level=debug traceID=37dcf2f01768ab20 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.939041ms" +ts=2024-05-02T12:17:24.939134855Z caller=http.go:194 level=debug traceID=365f7dccb8dccff2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.154793ms" +ts=2024-05-02T12:17:24.93629119Z caller=http.go:194 level=debug traceID=62e6e4ce8d16b908 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.345594ms" +ts=2024-05-02T12:17:24.933785159Z caller=http.go:194 level=debug traceID=43739c1030065c23 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.719259ms" +ts=2024-05-02T12:17:24.932770331Z caller=http.go:194 level=debug traceID=2de440ec43fd78df orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.737921ms" +ts=2024-05-02T12:17:24.931452307Z caller=http.go:194 level=debug traceID=37f8990ca2fc599e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.450601ms" +ts=2024-05-02T12:17:24.930160883Z caller=http.go:194 level=debug traceID=31d08b724f673539 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.085486ms" +ts=2024-05-02T12:17:24.930210288Z caller=http.go:194 level=debug traceID=39ac37ebb6105939 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.310415ms" +ts=2024-05-02T12:17:24.929637208Z caller=http.go:194 level=debug traceID=37dcf2f01768ab20 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.297639ms" +ts=2024-05-02T12:17:24.929649851Z caller=http.go:194 level=debug traceID=505bf06161f7147b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.233099ms" +ts=2024-05-02T12:17:24.928150478Z caller=http.go:194 level=debug traceID=3616fb36e92e9d6d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.363183ms" +ts=2024-05-02T12:17:24.923058157Z caller=http.go:194 level=debug traceID=212a7ae54cd42d3f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 13.972165ms" +ts=2024-05-02T12:17:24.921989929Z caller=http.go:194 level=debug traceID=3d5797e332fcd2ca orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.487867ms" +ts=2024-05-02T12:17:24.922098289Z caller=http.go:194 level=debug traceID=4f99b73978408443 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.003705ms" +ts=2024-05-02T12:17:24.92175595Z caller=http.go:194 level=debug traceID=7bbfbfb332a6e77a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.217822ms" +ts=2024-05-02T12:17:24.921773354Z caller=http.go:194 level=debug traceID=737a6566a143a7e8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.83345ms" +ts=2024-05-02T12:17:24.921559969Z caller=http.go:194 level=debug traceID=3edf3431a49499d9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.27856ms" +ts=2024-05-02T12:17:24.92125607Z caller=http.go:194 level=debug traceID=37f8990ca2fc599e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.221493ms" +ts=2024-05-02T12:17:24.921225498Z caller=http.go:194 level=debug traceID=110644b54edbb5c8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.052407ms" +ts=2024-05-02T12:17:24.92093316Z caller=http.go:194 level=debug traceID=50b196e750588be3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.60374ms" +ts=2024-05-02T12:17:24.920367411Z caller=http.go:194 level=debug traceID=18381a4a082aeee8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.420542ms" +ts=2024-05-02T12:17:24.920109223Z caller=http.go:194 level=debug traceID=3e5d528c6b628aee orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.479403ms" +ts=2024-05-02T12:17:24.919828124Z caller=http.go:194 level=debug traceID=01a17263d5422612 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.853346ms" +ts=2024-05-02T12:17:24.9193761Z caller=http.go:194 level=debug traceID=1f6252d64393b610 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.339168ms" +ts=2024-05-02T12:17:24.919142964Z caller=http.go:194 level=debug traceID=31d08b724f673539 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.643907ms" +ts=2024-05-02T12:17:24.918320646Z caller=http.go:194 level=debug traceID=1deda028ca536587 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.678518ms" +ts=2024-05-02T12:17:24.915230193Z caller=http.go:194 level=debug traceID=3f1bafa1f0bf6f38 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.888744ms" +ts=2024-05-02T12:17:24.914698834Z caller=http.go:194 level=debug traceID=4d0bf562eaf9509c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.423295ms" +ts=2024-05-02T12:17:24.913036009Z caller=http.go:194 level=debug traceID=65bb4f082d6223ce orgID=3648 msg="POST /push.v1.PusherService/Push (200) 236.448µs" +ts=2024-05-02T12:17:24.912587829Z caller=http.go:194 level=debug traceID=558df05961c9b7c4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.283468ms" +ts=2024-05-02T12:17:24.912393069Z caller=http.go:194 level=debug traceID=6f9883b0d6df814f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.593252ms" +ts=2024-05-02T12:17:24.912176581Z caller=http.go:194 level=debug traceID=50b196e750588be3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.502948ms" +ts=2024-05-02T12:17:24.910770908Z caller=http.go:194 level=debug traceID=598227a2acefff77 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.841139ms" +ts=2024-05-02T12:17:24.910210089Z caller=http.go:194 level=debug traceID=737a6566a143a7e8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.55047ms" +ts=2024-05-02T12:17:24.907931307Z caller=http.go:194 level=debug traceID=3f22fb5645c952f9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.874836ms" +ts=2024-05-02T12:17:24.907497657Z caller=http.go:194 level=debug traceID=212a7ae54cd42d3f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.829901ms" +ts=2024-05-02T12:17:24.90679734Z caller=http.go:194 level=debug traceID=1deda028ca536587 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.242106ms" +ts=2024-05-02T12:17:24.905727326Z caller=http.go:194 level=debug traceID=4aa26b3731a15d3b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.350403ms" +ts=2024-05-02T12:17:24.905143563Z caller=http.go:194 level=debug traceID=69e5c1408cd84740 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.096094ms" +ts=2024-05-02T12:17:24.904756241Z caller=http.go:194 level=debug traceID=3f1bafa1f0bf6f38 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.343267ms" +ts=2024-05-02T12:17:24.904229431Z caller=http.go:194 level=debug traceID=4d0bf562eaf9509c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.620024ms" +ts=2024-05-02T12:17:24.903421915Z caller=http.go:194 level=debug traceID=14a4e19abc92efd1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 216.204µs" +ts=2024-05-02T12:17:24.903114296Z caller=http.go:194 level=debug traceID=116eec9fadf56c1d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.01633ms" +ts=2024-05-02T12:17:24.902806227Z caller=http.go:194 level=debug traceID=2c97334f550b97a2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.096504ms" +ts=2024-05-02T12:17:24.90243762Z caller=http.go:194 level=debug traceID=7ad2e8095f9bf6d1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.212955ms" +ts=2024-05-02T12:17:24.9022658Z caller=http.go:194 level=debug traceID=4dffce7e68621c33 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.428407ms" +ts=2024-05-02T12:17:24.902135791Z caller=http.go:194 level=debug traceID=69d7a55a1ad86dd6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.446621ms" +ts=2024-05-02T12:17:24.90197564Z caller=http.go:194 level=debug traceID=42412dcb16a4ebfd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.678696ms" +ts=2024-05-02T12:17:24.901216022Z caller=http.go:194 level=debug traceID=558df05961c9b7c4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.221697ms" +ts=2024-05-02T12:17:24.899477794Z caller=http.go:194 level=debug traceID=5825efdc6d7702a4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.957644ms" +ts=2024-05-02T12:17:24.898302797Z caller=http.go:194 level=debug traceID=598227a2acefff77 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.303793ms" +ts=2024-05-02T12:17:24.897909291Z caller=http.go:194 level=debug traceID=20de709433d9b42f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.345887ms" +ts=2024-05-02T12:17:24.897811076Z caller=http.go:194 level=debug traceID=3f22fb5645c952f9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.918656ms" +ts=2024-05-02T12:17:24.896731662Z caller=http.go:194 level=debug traceID=5d1bb8547c514ace orgID=3648 msg="POST /push.v1.PusherService/Push (200) 433.656µs" +ts=2024-05-02T12:17:24.895866411Z caller=http.go:194 level=debug traceID=552eccda766be9a8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.781611ms" +ts=2024-05-02T12:17:24.895751695Z caller=http.go:194 level=debug traceID=2d31c777c0c822dc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.515964ms" +ts=2024-05-02T12:17:24.894917992Z caller=http.go:194 level=debug traceID=4aa26b3731a15d3b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.845159ms" +ts=2024-05-02T12:17:24.894022098Z caller=http.go:194 level=debug traceID=69e5c1408cd84740 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.213764ms" +ts=2024-05-02T12:17:24.893410983Z caller=http.go:194 level=debug traceID=01ee3cd3a9d786e4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.454719ms" +ts=2024-05-02T12:17:24.893062044Z caller=http.go:194 level=debug traceID=746c45145de6d133 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.676135ms" +ts=2024-05-02T12:17:24.891016795Z caller=http.go:194 level=debug traceID=69d7a55a1ad86dd6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.424509ms" +ts=2024-05-02T12:17:24.890438289Z caller=http.go:194 level=debug traceID=42412dcb16a4ebfd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.275263ms" +ts=2024-05-02T12:17:24.89022281Z caller=http.go:194 level=debug traceID=2c97334f550b97a2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.748332ms" +ts=2024-05-02T12:17:24.889745967Z caller=http.go:194 level=debug traceID=513b1593cee025e7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.249834ms" +ts=2024-05-02T12:17:24.888646328Z caller=http.go:194 level=debug traceID=0750212b13018958 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.684094ms" +ts=2024-05-02T12:17:24.888457513Z caller=http.go:194 level=debug traceID=6ecd531e7399da13 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.410522ms" +ts=2024-05-02T12:17:24.887589361Z caller=http.go:194 level=debug traceID=5825efdc6d7702a4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.736562ms" +ts=2024-05-02T12:17:24.886611846Z caller=http.go:194 level=debug traceID=049ae95ebb57d2c4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.475021ms" +ts=2024-05-02T12:17:24.886112939Z caller=http.go:194 level=debug traceID=552eccda766be9a8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.054434ms" +ts=2024-05-02T12:17:24.885593513Z caller=http.go:194 level=debug traceID=24e0f9b37903e25b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.230675ms" +ts=2024-05-02T12:17:24.883757847Z caller=http.go:194 level=debug traceID=4353fa936c8f45e0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.493706ms" +ts=2024-05-02T12:17:24.883695393Z caller=http.go:194 level=debug traceID=486bd4deeba6fa02 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.007689ms" +ts=2024-05-02T12:17:24.882485799Z caller=http.go:194 level=debug traceID=024ac2af710405b6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.651409ms" +ts=2024-05-02T12:17:24.882377344Z caller=http.go:194 level=debug traceID=4b3be5591d0c0210 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.096072ms" +ts=2024-05-02T12:17:24.882242392Z caller=http.go:194 level=debug traceID=06ca72d760da2d23 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.974607ms" +ts=2024-05-02T12:17:24.882129814Z caller=http.go:194 level=debug traceID=01ee3cd3a9d786e4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.528844ms" +ts=2024-05-02T12:17:24.881356506Z caller=http.go:194 level=debug traceID=3ab8f1f169f59750 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.912739ms" +ts=2024-05-02T12:17:24.881111757Z caller=http.go:194 level=debug traceID=58001ee199314add orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.772285ms" +ts=2024-05-02T12:17:24.880538038Z caller=http.go:194 level=debug traceID=52d6710439985182 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.801625ms" +ts=2024-05-02T12:17:24.880523657Z caller=http.go:194 level=debug traceID=1a56c32ecd9e62b0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.402026ms" +ts=2024-05-02T12:17:24.879864814Z caller=http.go:194 level=debug traceID=6b489e1877163608 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.92397ms" +ts=2024-05-02T12:17:24.878969225Z caller=http.go:194 level=debug traceID=380426b1fb623ffe orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.970745ms" +ts=2024-05-02T12:17:24.877745632Z caller=http.go:194 level=debug traceID=2e0fd38b613a1cb9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.397739ms" +ts=2024-05-02T12:17:24.876917695Z caller=http.go:194 level=debug traceID=29cbf6c39428573a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 694.234µs" +ts=2024-05-02T12:17:24.875623188Z caller=http.go:194 level=debug traceID=3b28e8a694e6c445 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.38212ms" +ts=2024-05-02T12:17:24.874883391Z caller=http.go:194 level=debug traceID=1abedce66aef06a7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.605523ms" +ts=2024-05-02T12:17:24.874264057Z caller=http.go:194 level=debug traceID=098928a91ae6749b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.719316ms" +ts=2024-05-02T12:17:24.873902802Z caller=http.go:194 level=debug traceID=24e0f9b37903e25b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.453567ms" +ts=2024-05-02T12:17:24.873874701Z caller=http.go:194 level=debug traceID=06ca72d760da2d23 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.772687ms" +ts=2024-05-02T12:17:24.873411274Z caller=http.go:194 level=debug traceID=0e8c2d8a3068e924 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.128376ms" +ts=2024-05-02T12:17:24.872748058Z caller=http.go:194 level=debug traceID=435e7c14c8d3e43d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.924635ms" +ts=2024-05-02T12:17:24.872606497Z caller=http.go:194 level=debug traceID=7b95d54bc93efdbf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.855124ms" +ts=2024-05-02T12:17:24.871010089Z caller=http.go:194 level=debug traceID=024ac2af710405b6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.151048ms" +ts=2024-05-02T12:17:24.870231301Z caller=http.go:194 level=debug traceID=7663a7f0a8e7219e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 424.801µs" +ts=2024-05-02T12:17:24.869699039Z caller=http.go:194 level=debug traceID=52d6710439985182 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.227827ms" +ts=2024-05-02T12:17:24.869694317Z caller=http.go:194 level=debug traceID=58001ee199314add orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.341864ms" +ts=2024-05-02T12:17:24.869471312Z caller=http.go:194 level=debug traceID=7c314ef42aba5423 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.299744ms" +ts=2024-05-02T12:17:24.86846235Z caller=http.go:194 level=debug traceID=380426b1fb623ffe orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.810241ms" +ts=2024-05-02T12:17:24.867678233Z caller=http.go:194 level=debug traceID=6b489e1877163608 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.559215ms" +ts=2024-05-02T12:17:24.867486612Z caller=http.go:194 level=debug traceID=4db0df0e4f17e938 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.857754ms" +ts=2024-05-02T12:17:24.867348576Z caller=http.go:194 level=debug traceID=0f997c993f24191d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.528249ms" +ts=2024-05-02T12:17:24.866107809Z caller=http.go:194 level=debug traceID=2e0fd38b613a1cb9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.875221ms" +ts=2024-05-02T12:17:24.8651452Z caller=http.go:194 level=debug traceID=3b28e8a694e6c445 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.885815ms" +ts=2024-05-02T12:17:24.86423871Z caller=http.go:194 level=debug traceID=3f4b535f64896512 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.218144ms" +ts=2024-05-02T12:17:24.862843371Z caller=http.go:194 level=debug traceID=286a3084f93cf618 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.884803ms" +ts=2024-05-02T12:17:24.861924881Z caller=http.go:194 level=debug traceID=0a796d2bce1a23d9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.268709ms" +ts=2024-05-02T12:17:24.860889308Z caller=http.go:194 level=debug traceID=435e7c14c8d3e43d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.955984ms" +ts=2024-05-02T12:17:24.860717675Z caller=http.go:194 level=debug traceID=68ae7d4df086aaae orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.35367ms" +ts=2024-05-02T12:17:24.859371129Z caller=http.go:194 level=debug traceID=39593dbbfc3f3976 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.488194ms" +ts=2024-05-02T12:17:24.857500416Z caller=http.go:194 level=debug traceID=4721d75decdc4200 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.967709ms" +ts=2024-05-02T12:17:24.85630458Z caller=http.go:194 level=debug traceID=3c04dd241faf2b0b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.198748ms" +ts=2024-05-02T12:17:24.856298591Z caller=http.go:194 level=debug traceID=0f997c993f24191d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.157476ms" +ts=2024-05-02T12:17:24.856159188Z caller=http.go:194 level=debug traceID=4ee4a77e8f76c4c2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.937014ms" +ts=2024-05-02T12:17:24.85571503Z caller=http.go:194 level=debug traceID=41e59fc429671f95 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.908397ms" +ts=2024-05-02T12:17:24.855483457Z caller=http.go:194 level=debug traceID=7d99fa6b0298ad60 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.274463ms" +ts=2024-05-02T12:17:24.854241979Z caller=http.go:194 level=debug traceID=75ccc913c8997476 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.799702ms" +ts=2024-05-02T12:17:24.853536442Z caller=http.go:194 level=debug traceID=784b2fe3fcb4e00a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.416629ms" +ts=2024-05-02T12:17:24.853500546Z caller=http.go:194 level=debug traceID=0939b57019706057 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.016838ms" +ts=2024-05-02T12:17:24.852769774Z caller=http.go:194 level=debug traceID=4711768b11d86140 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.578008ms" +ts=2024-05-02T12:17:24.852695001Z caller=http.go:194 level=debug traceID=3f4b535f64896512 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.042209ms" +ts=2024-05-02T12:17:24.850226618Z caller=http.go:194 level=debug traceID=0a796d2bce1a23d9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.845327ms" +ts=2024-05-02T12:17:24.85000238Z caller=http.go:194 level=debug traceID=62878f719c643130 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.003566ms" +ts=2024-05-02T12:17:24.849686769Z caller=http.go:194 level=debug traceID=68ae7d4df086aaae orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.21872ms" +ts=2024-05-02T12:17:24.847871044Z caller=http.go:194 level=debug traceID=38bc735e2031ed8c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.968926ms" +ts=2024-05-02T12:17:24.846489283Z caller=http.go:194 level=debug traceID=4721d75decdc4200 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.092488ms" +ts=2024-05-02T12:17:24.846176237Z caller=http.go:194 level=debug traceID=0939b57019706057 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.443908ms" +ts=2024-05-02T12:17:24.846057784Z caller=http.go:194 level=debug traceID=74ae06f88099cc0b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.201119ms" +ts=2024-05-02T12:17:24.845580879Z caller=http.go:194 level=debug traceID=4967eb17040e3a55 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.888067ms" +ts=2024-05-02T12:17:24.844710486Z caller=http.go:194 level=debug traceID=3c04dd241faf2b0b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.309237ms" +ts=2024-05-02T12:17:24.844324221Z caller=http.go:194 level=debug traceID=784b2fe3fcb4e00a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 29.049135ms" +ts=2024-05-02T12:17:24.844076764Z caller=http.go:194 level=debug traceID=4711768b11d86140 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.611387ms" +ts=2024-05-02T12:17:24.843531293Z caller=http.go:194 level=debug traceID=6bf985aa8ef5c2e8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.490554ms" +ts=2024-05-02T12:17:24.843299253Z caller=http.go:194 level=debug traceID=75ccc913c8997476 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.923874ms" +ts=2024-05-02T12:17:24.84229462Z caller=http.go:194 level=debug traceID=3fc989c1d1506adb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.809415ms" +ts=2024-05-02T12:17:24.842175873Z caller=http.go:194 level=debug traceID=6c49fc997bc5af6b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 49.925953ms" +ts=2024-05-02T12:17:24.841326521Z caller=http.go:194 level=debug traceID=3ec5b98ec0e0c81b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.442262ms" +ts=2024-05-02T12:17:24.836713881Z caller=http.go:194 level=debug traceID=23ffa5da22cedd77 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.810874ms" +ts=2024-05-02T12:17:24.836163531Z caller=http.go:194 level=debug traceID=2f6a1702c8794b9c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.290541ms" +ts=2024-05-02T12:17:24.834322818Z caller=http.go:194 level=debug traceID=125e39f550d842d5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.366751ms" +ts=2024-05-02T12:17:24.834276427Z caller=http.go:194 level=debug traceID=2093b30edd9dcdb1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.204274ms" +ts=2024-05-02T12:17:24.833914095Z caller=http.go:194 level=debug traceID=0332987d4d9745af orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.036156ms" +ts=2024-05-02T12:17:24.832392097Z caller=http.go:194 level=debug traceID=6bf985aa8ef5c2e8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.02871ms" +ts=2024-05-02T12:17:24.832343245Z caller=http.go:194 level=debug traceID=28839edc554097a2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.90198ms" +ts=2024-05-02T12:17:24.831011629Z caller=http.go:194 level=debug traceID=5a3eaef0b85437b7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.36448ms" +ts=2024-05-02T12:17:24.830216625Z caller=http.go:194 level=debug traceID=3fc989c1d1506adb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.379642ms" +ts=2024-05-02T12:17:24.829903409Z caller=http.go:194 level=debug traceID=128cfe668259cbaa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 13.205994ms" +ts=2024-05-02T12:17:24.829383546Z caller=http.go:194 level=debug traceID=77ac30ea6f0a8ceb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.566319ms" +ts=2024-05-02T12:17:24.829208465Z caller=http.go:194 level=debug traceID=755211ff8515a857 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.045342ms" +ts=2024-05-02T12:17:24.828360351Z caller=http.go:194 level=debug traceID=2fc1c90492f1f2fa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.489397ms" +ts=2024-05-02T12:17:24.827683959Z caller=http.go:194 level=debug traceID=202811e47c4795f0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.97327ms" +ts=2024-05-02T12:17:24.826561108Z caller=http.go:194 level=debug traceID=29395baba48847a6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.853773ms" +ts=2024-05-02T12:17:24.826330413Z caller=http.go:194 level=debug traceID=41c5ed6932e03fb4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.323585ms" +ts=2024-05-02T12:17:24.824975406Z caller=http.go:194 level=debug traceID=7c6c4ed7e9c4fbc5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.783601ms" +ts=2024-05-02T12:17:24.824393006Z caller=http.go:194 level=debug traceID=2093b30edd9dcdb1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.24048ms" +ts=2024-05-02T12:17:24.823779946Z caller=http.go:194 level=debug traceID=3bc3414864f7e2fa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 45.052444ms" +ts=2024-05-02T12:17:24.82302995Z caller=http.go:194 level=debug traceID=69c595c1e028ebbe orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.114789ms" +ts=2024-05-02T12:17:24.822948991Z caller=http.go:194 level=debug traceID=125e39f550d842d5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.255722ms" +ts=2024-05-02T12:17:24.822285483Z caller=http.go:194 level=debug traceID=0332987d4d9745af orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.62935ms" +ts=2024-05-02T12:17:24.821348755Z caller=http.go:194 level=debug traceID=28839edc554097a2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.92766ms" +ts=2024-05-02T12:17:24.819951959Z caller=http.go:194 level=debug traceID=202811e47c4795f0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.859135ms" +ts=2024-05-02T12:17:24.819326209Z caller=http.go:194 level=debug traceID=77ac30ea6f0a8ceb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.670286ms" +ts=2024-05-02T12:17:24.819310145Z caller=http.go:194 level=debug traceID=732beffe634aaad5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.62752ms" +ts=2024-05-02T12:17:24.818417458Z caller=http.go:194 level=debug traceID=2fc1c90492f1f2fa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.576807ms" +ts=2024-05-02T12:17:24.817012275Z caller=http.go:194 level=debug traceID=4acf3703721f38e0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.821994ms" +ts=2024-05-02T12:17:24.814540716Z caller=http.go:194 level=debug traceID=2cd75358d1b4a795 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.498892ms" +ts=2024-05-02T12:17:24.814347536Z caller=http.go:194 level=debug traceID=128cfe668259cbaa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 25.803949ms" +ts=2024-05-02T12:17:24.813728386Z caller=http.go:194 level=debug traceID=7c6c4ed7e9c4fbc5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.769157ms" +ts=2024-05-02T12:17:24.812297013Z caller=http.go:194 level=debug traceID=69c595c1e028ebbe orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.721171ms" +ts=2024-05-02T12:17:24.809210652Z caller=http.go:194 level=debug traceID=26523ee1f90cd3b6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.31169ms" +ts=2024-05-02T12:17:24.80502285Z caller=http.go:194 level=debug traceID=18354745b53ae094 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.496916ms" +ts=2024-05-02T12:17:24.804809889Z caller=http.go:194 level=debug traceID=4acf3703721f38e0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.609102ms" +ts=2024-05-02T12:17:24.803161064Z caller=http.go:194 level=debug traceID=2cd75358d1b4a795 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.309593ms" +ts=2024-05-02T12:17:24.802918244Z caller=http.go:194 level=debug traceID=13063ba8c4f60926 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.038992ms" +ts=2024-05-02T12:17:24.802411476Z caller=http.go:194 level=debug traceID=5ee31ebedaf3a0d1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.914254ms" +ts=2024-05-02T12:17:24.798108894Z caller=http.go:194 level=debug traceID=26523ee1f90cd3b6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.597704ms" +ts=2024-05-02T12:17:24.797795936Z caller=http.go:194 level=debug traceID=6f9e2ec8bf1c4e6d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.965666ms" +ts=2024-05-02T12:17:24.794051083Z caller=http.go:194 level=debug traceID=7a2a1519748cb977 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.073775ms" +ts=2024-05-02T12:17:24.793150349Z caller=http.go:194 level=debug traceID=130200c0d7450fa2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.240099ms" +ts=2024-05-02T12:17:24.791761869Z caller=http.go:194 level=debug traceID=5396a40ced8c53af orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.576484ms" +ts=2024-05-02T12:17:24.790761298Z caller=http.go:194 level=debug traceID=1a03648ac2913679 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.008777ms" +ts=2024-05-02T12:17:24.788736033Z caller=http.go:194 level=debug traceID=5ee31ebedaf3a0d1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.549723ms" +ts=2024-05-02T12:17:24.785871942Z caller=http.go:194 level=debug traceID=3bc3414864f7e2fa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 20.186825ms" +ts=2024-05-02T12:17:24.785882831Z caller=http.go:194 level=debug traceID=6f9e2ec8bf1c4e6d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.516318ms" +ts=2024-05-02T12:17:24.785377771Z caller=http.go:194 level=debug traceID=2c4910b7a9c6b0e4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.455271ms" +ts=2024-05-02T12:17:24.785267089Z caller=http.go:194 level=debug traceID=51cd7f74a4b03219 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.275766ms" +ts=2024-05-02T12:17:24.784085396Z caller=http.go:194 level=debug traceID=47d6cd351accea3d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.391246ms" +ts=2024-05-02T12:17:24.782083048Z caller=http.go:194 level=debug traceID=7a2a1519748cb977 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.938461ms" +ts=2024-05-02T12:17:24.780636798Z caller=http.go:194 level=debug traceID=5396a40ced8c53af orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.690022ms" +ts=2024-05-02T12:17:24.779002829Z caller=http.go:194 level=debug traceID=05f1bda66f8efbd3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.324322ms" +ts=2024-05-02T12:17:24.777572433Z caller=http.go:194 level=debug traceID=44892c45386c5867 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.762415ms" +ts=2024-05-02T12:17:24.776861565Z caller=http.go:194 level=debug traceID=67d0fab9bc999daa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.377025ms" +ts=2024-05-02T12:17:24.775318874Z caller=http.go:194 level=debug traceID=296d964d3cb90adb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.989717ms" +ts=2024-05-02T12:17:24.775133344Z caller=http.go:194 level=debug traceID=2c4910b7a9c6b0e4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.948417ms" +ts=2024-05-02T12:17:24.773846895Z caller=http.go:194 level=debug traceID=47d6cd351accea3d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.496595ms" +ts=2024-05-02T12:17:24.773633006Z caller=http.go:194 level=debug traceID=51cd7f74a4b03219 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.09227ms" +ts=2024-05-02T12:17:24.76957574Z caller=http.go:194 level=debug traceID=4c90af809d99e636 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.53076ms" +ts=2024-05-02T12:17:24.766789202Z caller=http.go:194 level=debug traceID=2d8d164e10775552 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.397622ms" +ts=2024-05-02T12:17:24.766563159Z caller=http.go:194 level=debug traceID=5d54b03a805a43c2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.380049ms" +ts=2024-05-02T12:17:24.764030253Z caller=http.go:194 level=debug traceID=64b29395f5184897 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.375977ms" +ts=2024-05-02T12:17:24.763759811Z caller=http.go:194 level=debug traceID=296d964d3cb90adb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.078914ms" +ts=2024-05-02T12:17:24.761176324Z caller=http.go:194 level=debug traceID=6c5509b2b5683ae5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.424781ms" +ts=2024-05-02T12:17:24.76043775Z caller=http.go:194 level=debug traceID=4c90af809d99e636 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.28208ms" +ts=2024-05-02T12:17:24.756010939Z caller=http.go:194 level=debug traceID=2d8d164e10775552 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.298319ms" +ts=2024-05-02T12:17:24.755256415Z caller=http.go:194 level=debug traceID=5d54b03a805a43c2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.282577ms" +ts=2024-05-02T12:17:24.754582866Z caller=http.go:194 level=debug traceID=141b9c0fa9e3db26 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 25.785826ms" +ts=2024-05-02T12:17:24.753372395Z caller=http.go:194 level=debug traceID=4e2aba2468069a60 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 411.751µs" +ts=2024-05-02T12:17:24.751801313Z caller=http.go:194 level=debug traceID=0471cdd33effd317 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.047359ms" +ts=2024-05-02T12:17:24.751720482Z caller=http.go:194 level=debug traceID=64b29395f5184897 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.33544ms" +ts=2024-05-02T12:17:24.750574889Z caller=http.go:194 level=debug traceID=6c5509b2b5683ae5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.929868ms" +ts=2024-05-02T12:17:24.749849226Z caller=http.go:194 level=debug traceID=72efe5dd076add17 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.705953ms" +ts=2024-05-02T12:17:24.746231117Z caller=http.go:194 level=debug traceID=6e1f81e3f60b0b10 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.744196ms" +ts=2024-05-02T12:17:24.746118884Z caller=http.go:194 level=debug traceID=0786cc9693aa8115 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.036557ms" +ts=2024-05-02T12:17:24.742632449Z caller=http.go:194 level=debug traceID=441c1f331cb047a1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.882067ms" +ts=2024-05-02T12:17:24.741221427Z caller=http.go:194 level=debug traceID=70c7446e8265445a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.316127ms" +ts=2024-05-02T12:17:24.740786746Z caller=http.go:194 level=debug traceID=4d4c4a164ea0bd1a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.850924ms" +ts=2024-05-02T12:17:24.74062908Z caller=http.go:194 level=debug traceID=1051c33c112a2ae5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.597282ms" +ts=2024-05-02T12:17:24.739613813Z caller=http.go:194 level=debug traceID=0af1d66e3c1e9bce orgID=3648 msg="POST /push.v1.PusherService/Push (200) 17.426969ms" +ts=2024-05-02T12:17:24.738365786Z caller=http.go:194 level=debug traceID=72efe5dd076add17 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.076793ms" +ts=2024-05-02T12:17:24.736126664Z caller=http.go:194 level=debug traceID=0786cc9693aa8115 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.121345ms" +ts=2024-05-02T12:17:24.735847173Z caller=http.go:194 level=debug traceID=6c636f280c2b68aa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.858019ms" +ts=2024-05-02T12:17:24.735162391Z caller=http.go:194 level=debug traceID=33a1c0ce8b43b23f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.399359ms" +ts=2024-05-02T12:17:24.73437492Z caller=http.go:194 level=debug traceID=5869af595c15172c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.569003ms" +ts=2024-05-02T12:17:24.734112959Z caller=http.go:194 level=debug traceID=441c1f331cb047a1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.644135ms" +ts=2024-05-02T12:17:24.731597744Z caller=http.go:194 level=debug traceID=148a15b6a51583e7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 385.9µs" +ts=2024-05-02T12:17:24.729143516Z caller=http.go:194 level=debug traceID=70c7446e8265445a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.055767ms" +ts=2024-05-02T12:17:24.72907847Z caller=http.go:194 level=debug traceID=1051c33c112a2ae5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.421828ms" +ts=2024-05-02T12:17:24.725788079Z caller=http.go:194 level=debug traceID=0af1d66e3c1e9bce orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.982872ms" +ts=2024-05-02T12:17:24.72562067Z caller=http.go:194 level=debug traceID=6c636f280c2b68aa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.684823ms" +ts=2024-05-02T12:17:24.725448401Z caller=http.go:194 level=debug traceID=33a1c0ce8b43b23f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.820359ms" +ts=2024-05-02T12:17:24.725008978Z caller=http.go:194 level=debug traceID=2827f6065ebb2b52 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 30.878309ms" +ts=2024-05-02T12:17:24.723582692Z caller=http.go:194 level=debug traceID=5869af595c15172c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.05488ms" +ts=2024-05-02T12:17:24.720002136Z caller=http.go:194 level=debug traceID=148a15b6a51583e7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 470.421µs" +ts=2024-05-02T12:17:24.714852665Z caller=http.go:194 level=debug traceID=7b1f154372c9cfaf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.84014ms" +ts=2024-05-02T12:17:24.711444679Z caller=http.go:194 level=debug traceID=72b97a8a366030de orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.984671ms" +ts=2024-05-02T12:17:24.70818697Z caller=http.go:194 level=debug traceID=4c96dc1c1f117e17 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.973675ms" +ts=2024-05-02T12:17:24.70605747Z caller=http.go:194 level=debug traceID=488d1bd9af2f38ab orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.585602ms" +ts=2024-05-02T12:17:24.704419695Z caller=http.go:194 level=debug traceID=10c5df3bbb95aef4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 407.911µs" +ts=2024-05-02T12:17:24.702228834Z caller=http.go:194 level=debug traceID=7b1f154372c9cfaf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.762528ms" +ts=2024-05-02T12:17:24.700510165Z caller=http.go:194 level=debug traceID=49bd83f8e025e190 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.399573ms" +ts=2024-05-02T12:17:24.700078982Z caller=http.go:194 level=debug traceID=6636125d2ee0175d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 257.426µs" +ts=2024-05-02T12:17:24.699573452Z caller=http.go:194 level=debug traceID=72b97a8a366030de orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.468068ms" +ts=2024-05-02T12:17:24.699374731Z caller=http.go:194 level=debug traceID=05e0b8ab7bef3012 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 295.361µs" +ts=2024-05-02T12:17:24.698048709Z caller=http.go:194 level=debug traceID=488d1bd9af2f38ab orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.325928ms" +ts=2024-05-02T12:17:24.693069809Z caller=http.go:194 level=debug traceID=10c5df3bbb95aef4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 366.499µs" +ts=2024-05-02T12:17:24.692105448Z caller=http.go:194 level=debug traceID=6d191605c173ec15 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.129127ms" +ts=2024-05-02T12:17:24.689735299Z caller=http.go:194 level=debug traceID=0126c624cb09fbdf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 300.913µs" +ts=2024-05-02T12:17:24.689423604Z caller=http.go:194 level=debug traceID=05e0b8ab7bef3012 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 470.356µs" +ts=2024-05-02T12:17:24.68897213Z caller=http.go:194 level=debug traceID=15571bfb946eb9ec orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.448266ms" +ts=2024-05-02T12:17:24.688353803Z caller=http.go:194 level=debug traceID=6636125d2ee0175d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 278.439µs" +ts=2024-05-02T12:17:24.686766147Z caller=http.go:194 level=debug traceID=4ee1f352ea3330dc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.871726ms" +ts=2024-05-02T12:17:24.685720517Z caller=http.go:194 level=debug traceID=5af3e5859683c1e4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.316633ms" +ts=2024-05-02T12:17:24.682065737Z caller=http.go:194 level=debug traceID=6d191605c173ec15 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.17179ms" +ts=2024-05-02T12:17:24.679128195Z caller=http.go:194 level=debug traceID=498ccb6d374f8f2a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.531803ms" +ts=2024-05-02T12:17:24.678752942Z caller=http.go:194 level=debug traceID=15571bfb946eb9ec orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.429534ms" +ts=2024-05-02T12:17:24.678206852Z caller=http.go:194 level=debug traceID=6925d3ef90ab2bb8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.190132ms" +ts=2024-05-02T12:17:24.676331743Z caller=http.go:194 level=debug traceID=4ee1f352ea3330dc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.041268ms" +ts=2024-05-02T12:17:24.676054426Z caller=http.go:194 level=debug traceID=5af3e5859683c1e4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.654887ms" +ts=2024-05-02T12:17:24.670888229Z caller=http.go:194 level=debug traceID=4049c03de2c5f154 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.636991ms" +ts=2024-05-02T12:17:24.668785002Z caller=http.go:194 level=debug traceID=3f35948d69f73b53 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.146736ms" +ts=2024-05-02T12:17:24.667144181Z caller=http.go:194 level=debug traceID=6925d3ef90ab2bb8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.439147ms" +ts=2024-05-02T12:17:24.66710787Z caller=http.go:194 level=debug traceID=498ccb6d374f8f2a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.964448ms" +ts=2024-05-02T12:17:24.666135239Z caller=http.go:194 level=debug traceID=305006785b7e7232 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.551761ms" +ts=2024-05-02T12:17:24.664922852Z caller=http.go:194 level=debug traceID=003a04e43e0302b8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.384058ms" +ts=2024-05-02T12:17:24.664172538Z caller=http.go:194 level=debug traceID=372e550dedeb7a93 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 19.084395ms" +ts=2024-05-02T12:17:24.660756734Z caller=http.go:194 level=debug traceID=4049c03de2c5f154 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.735116ms" +ts=2024-05-02T12:17:24.660753854Z caller=http.go:194 level=debug traceID=5b750e954548cf04 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.329713ms" +ts=2024-05-02T12:17:24.660410727Z caller=http.go:194 level=debug traceID=4f207240690cb64a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.019437ms" +ts=2024-05-02T12:17:24.659975798Z caller=http.go:194 level=debug traceID=656d1e5aaf641fc0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.062588ms" +ts=2024-05-02T12:17:24.658856076Z caller=http.go:194 level=debug traceID=3f35948d69f73b53 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.296319ms" +ts=2024-05-02T12:17:24.655397754Z caller=http.go:194 level=debug traceID=305006785b7e7232 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.342627ms" +ts=2024-05-02T12:17:24.653137807Z caller=http.go:194 level=debug traceID=01e8d71d468958aa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.058317ms" +ts=2024-05-02T12:17:24.650446559Z caller=http.go:194 level=debug traceID=4f207240690cb64a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.709229ms" +ts=2024-05-02T12:17:24.650370252Z caller=http.go:194 level=debug traceID=62702cb2820e31fa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.932934ms" +ts=2024-05-02T12:17:24.650294104Z caller=http.go:194 level=debug traceID=5b750e954548cf04 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.100273ms" +ts=2024-05-02T12:17:24.649156541Z caller=http.go:194 level=debug traceID=656d1e5aaf641fc0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.536297ms" +ts=2024-05-02T12:17:24.648662759Z caller=http.go:194 level=debug traceID=21e48362c0f1b333 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 385.371µs" +ts=2024-05-02T12:17:24.64505271Z caller=http.go:194 level=debug traceID=76d588014d05c616 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.564275ms" +ts=2024-05-02T12:17:24.6434029Z caller=http.go:194 level=debug traceID=37d168f9c84e574e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 22.221307ms" +ts=2024-05-02T12:17:24.642640684Z caller=http.go:194 level=debug traceID=37d168f9c84e574e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.131691ms" +ts=2024-05-02T12:17:24.641233238Z caller=http.go:194 level=debug traceID=01e8d71d468958aa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.815507ms" +ts=2024-05-02T12:17:24.639389454Z caller=http.go:194 level=debug traceID=636d626aa447a09c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.818337ms" +ts=2024-05-02T12:17:24.637578615Z caller=http.go:194 level=debug traceID=62702cb2820e31fa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.139892ms" +ts=2024-05-02T12:17:24.63748803Z caller=http.go:194 level=debug traceID=5c1c6162f3a47d26 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.413658ms" +ts=2024-05-02T12:17:24.637198399Z caller=http.go:194 level=debug traceID=4181045c8d50f51c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 255.491µs" +ts=2024-05-02T12:17:24.636995974Z caller=http.go:194 level=debug traceID=21e48362c0f1b333 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 353.455µs" +ts=2024-05-02T12:17:24.636863172Z caller=http.go:194 level=debug traceID=6be4a02d2bd05afb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.833792ms" +ts=2024-05-02T12:17:24.63394388Z caller=http.go:194 level=debug traceID=1f7c297abf81773b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 308.641µs" +ts=2024-05-02T12:17:24.633521178Z caller=http.go:194 level=debug traceID=76d588014d05c616 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.931252ms" +ts=2024-05-02T12:17:24.629034334Z caller=http.go:194 level=debug traceID=2eeb2b91260df1b0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 20.114501ms" +ts=2024-05-02T12:17:24.62868724Z caller=http.go:194 level=debug traceID=2d38bd1ffc9342a3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.11961ms" +ts=2024-05-02T12:17:24.628467631Z caller=http.go:194 level=debug traceID=636d626aa447a09c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.972978ms" +ts=2024-05-02T12:17:24.627160121Z caller=http.go:194 level=debug traceID=6be4a02d2bd05afb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.511227ms" +ts=2024-05-02T12:17:24.626432701Z caller=http.go:194 level=debug traceID=30cad14dae5d8be4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 261.48µs" +ts=2024-05-02T12:17:24.624817293Z caller=http.go:194 level=debug traceID=5c1c6162f3a47d26 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.749268ms" +ts=2024-05-02T12:17:24.62388553Z caller=http.go:194 level=debug traceID=1f7c297abf81773b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 345.017µs" +ts=2024-05-02T12:17:24.621424654Z caller=http.go:194 level=debug traceID=2d38bd1ffc9342a3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.145367ms" +ts=2024-05-02T12:17:24.62007775Z caller=http.go:194 level=debug traceID=709a4cba120d5ebb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.787949ms" +ts=2024-05-02T12:17:24.619188131Z caller=http.go:194 level=debug traceID=2eeb2b91260df1b0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 22.427072ms" +ts=2024-05-02T12:17:24.616413177Z caller=http.go:194 level=debug traceID=30cad14dae5d8be4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 524.76µs" +ts=2024-05-02T12:17:24.616019135Z caller=http.go:194 level=debug traceID=4c93a65ca56d9131 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.746183ms" +ts=2024-05-02T12:17:24.615432936Z caller=http.go:194 level=debug traceID=5ee4872dd6e3b641 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.296748ms" +ts=2024-05-02T12:17:24.614819417Z caller=http.go:194 level=debug traceID=5df36a532898114b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.820942ms" +ts=2024-05-02T12:17:24.614447013Z caller=http.go:194 level=debug traceID=1472abba2ada9e79 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.297328ms" +ts=2024-05-02T12:17:24.6119734Z caller=http.go:194 level=debug traceID=5aa9effa4d39cccd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.084468ms" +ts=2024-05-02T12:17:24.612046849Z caller=http.go:194 level=debug traceID=494df078e879033e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.738011ms" +ts=2024-05-02T12:17:24.60954508Z caller=http.go:194 level=debug traceID=647314bc2c9977b3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.516933ms" +ts=2024-05-02T12:17:24.608820532Z caller=http.go:194 level=debug traceID=709a4cba120d5ebb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.729472ms" +ts=2024-05-02T12:17:24.608274279Z caller=http.go:194 level=debug traceID=0a89ce42a3cd232f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.775992ms" +ts=2024-05-02T12:17:24.606451248Z caller=http.go:194 level=debug traceID=21133977e13ede4d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.690831ms" +ts=2024-05-02T12:17:24.604876022Z caller=http.go:194 level=debug traceID=5df36a532898114b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.000773ms" +ts=2024-05-02T12:17:24.604785028Z caller=http.go:194 level=debug traceID=1472abba2ada9e79 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.639454ms" +ts=2024-05-02T12:17:24.604400249Z caller=http.go:194 level=debug traceID=5ee4872dd6e3b641 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.788822ms" +ts=2024-05-02T12:17:24.604392238Z caller=http.go:194 level=debug traceID=4c93a65ca56d9131 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.665247ms" +ts=2024-05-02T12:17:24.603271619Z caller=http.go:194 level=debug traceID=13e7675235c7ed46 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.499035ms" +ts=2024-05-02T12:17:24.601845787Z caller=http.go:194 level=debug traceID=17933cca69e7fbc8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.024678ms" +ts=2024-05-02T12:17:24.601661465Z caller=http.go:194 level=debug traceID=494df078e879033e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.519245ms" +ts=2024-05-02T12:17:24.601306763Z caller=http.go:194 level=debug traceID=5aa9effa4d39cccd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.004996ms" +ts=2024-05-02T12:17:24.599746485Z caller=http.go:194 level=debug traceID=77bdfebd9c8835f4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 253.178µs" +ts=2024-05-02T12:17:24.599312789Z caller=http.go:194 level=debug traceID=0a89ce42a3cd232f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.915037ms" +ts=2024-05-02T12:17:24.595875625Z caller=http.go:194 level=debug traceID=17933cca69e7fbc8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.223239ms" +ts=2024-05-02T12:17:24.594938987Z caller=http.go:194 level=debug traceID=21133977e13ede4d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.671487ms" +ts=2024-05-02T12:17:24.594639316Z caller=http.go:194 level=debug traceID=5a5ce4ec66a5affb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.885514ms" +ts=2024-05-02T12:17:24.59456714Z caller=http.go:194 level=debug traceID=7a2808a4b1887051 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 328.249µs" +ts=2024-05-02T12:17:24.593066547Z caller=http.go:194 level=debug traceID=13e7675235c7ed46 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.488064ms" +ts=2024-05-02T12:17:24.592022617Z caller=http.go:194 level=debug traceID=664bc01c411d0ba5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.068986ms" +ts=2024-05-02T12:17:24.591086757Z caller=http.go:194 level=debug traceID=4a2535085a6a520d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.850803ms" +ts=2024-05-02T12:17:24.591069725Z caller=http.go:194 level=debug traceID=1d8690bbb9b0278c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.866835ms" +ts=2024-05-02T12:17:24.590544912Z caller=http.go:194 level=debug traceID=299b3635c206b4b4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.298268ms" +ts=2024-05-02T12:17:24.588675761Z caller=http.go:194 level=debug traceID=77bdfebd9c8835f4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 343.57µs" +ts=2024-05-02T12:17:24.586070877Z caller=http.go:194 level=debug traceID=293cd056d18447f8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.217848ms" +ts=2024-05-02T12:17:24.585980483Z caller=http.go:194 level=debug traceID=41bc9d9ec83396d0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.788281ms" +ts=2024-05-02T12:17:24.585029358Z caller=http.go:194 level=debug traceID=34db6b783d17a397 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.903753ms" +ts=2024-05-02T12:17:24.583506996Z caller=http.go:194 level=debug traceID=7a2808a4b1887051 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 530.827µs" +ts=2024-05-02T12:17:24.583007624Z caller=http.go:194 level=debug traceID=5a5ce4ec66a5affb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.760875ms" +ts=2024-05-02T12:17:24.581225814Z caller=http.go:194 level=debug traceID=664bc01c411d0ba5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.532757ms" +ts=2024-05-02T12:17:24.580025035Z caller=http.go:194 level=debug traceID=1d8690bbb9b0278c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.076982ms" +ts=2024-05-02T12:17:24.577369257Z caller=http.go:194 level=debug traceID=299b3635c206b4b4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.061274ms" +ts=2024-05-02T12:17:24.575404062Z caller=http.go:194 level=debug traceID=34db6b783d17a397 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.239709ms" +ts=2024-05-02T12:17:24.575415101Z caller=http.go:194 level=debug traceID=293cd056d18447f8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.930857ms" +ts=2024-05-02T12:17:24.574068301Z caller=http.go:194 level=debug traceID=41bc9d9ec83396d0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.378053ms" +ts=2024-05-02T12:17:24.573469377Z caller=http.go:194 level=debug traceID=0bb553e79fa879b8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.542454ms" +ts=2024-05-02T12:17:24.570690102Z caller=http.go:194 level=debug traceID=7710ea67fe884c4a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.672354ms" +ts=2024-05-02T12:17:24.567246303Z caller=http.go:194 level=debug traceID=0bb553e79fa879b8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.967169ms" +ts=2024-05-02T12:17:24.566875296Z caller=http.go:194 level=debug traceID=433903fcbaad60b0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.960116ms" +ts=2024-05-02T12:17:24.565687058Z caller=http.go:194 level=debug traceID=4bf4d656c83a0598 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.075192ms" +ts=2024-05-02T12:17:24.564703972Z caller=http.go:194 level=debug traceID=7bd6c2f79d817b13 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 300.652µs" +ts=2024-05-02T12:17:24.562628616Z caller=http.go:194 level=debug traceID=653cbfc9015246d2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.775746ms" +ts=2024-05-02T12:17:24.562167601Z caller=http.go:194 level=debug traceID=5f47cb1da73db722 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.69314ms" +ts=2024-05-02T12:17:24.561621807Z caller=http.go:194 level=debug traceID=4e4df18092a48fdf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.90788ms" +ts=2024-05-02T12:17:24.559985104Z caller=http.go:194 level=debug traceID=7710ea67fe884c4a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.798778ms" +ts=2024-05-02T12:17:24.559352907Z caller=http.go:194 level=debug traceID=518e038141d0db5d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.660467ms" +ts=2024-05-02T12:17:24.557585542Z caller=http.go:194 level=debug traceID=6e0b2cc1df079973 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.500216ms" +ts=2024-05-02T12:17:24.555082019Z caller=http.go:194 level=debug traceID=433903fcbaad60b0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.484358ms" +ts=2024-05-02T12:17:24.55434978Z caller=http.go:194 level=debug traceID=4bf4d656c83a0598 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.948468ms" +ts=2024-05-02T12:17:24.553552252Z caller=http.go:194 level=debug traceID=7bd6c2f79d817b13 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 386.746µs" +ts=2024-05-02T12:17:24.553382137Z caller=http.go:194 level=debug traceID=5a2977a75f7232a6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.488243ms" +ts=2024-05-02T12:17:24.553239126Z caller=http.go:194 level=debug traceID=292caea58ccc5a16 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 304.518µs" +ts=2024-05-02T12:17:24.551670054Z caller=http.go:194 level=debug traceID=653cbfc9015246d2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.783351ms" +ts=2024-05-02T12:17:24.551637773Z caller=http.go:194 level=debug traceID=4e4df18092a48fdf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.533864ms" +ts=2024-05-02T12:17:24.551464718Z caller=http.go:194 level=debug traceID=5f47cb1da73db722 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.564543ms" +ts=2024-05-02T12:17:24.550634521Z caller=http.go:194 level=debug traceID=63a7aefe6c66670a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.415552ms" +ts=2024-05-02T12:17:24.550472169Z caller=http.go:194 level=debug traceID=57afe684138660a4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.87225ms" +ts=2024-05-02T12:17:24.550373142Z caller=http.go:194 level=debug traceID=08fac5329611ca63 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 244.455µs" +ts=2024-05-02T12:17:24.549502428Z caller=http.go:194 level=debug traceID=518e038141d0db5d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.825598ms" +ts=2024-05-02T12:17:24.546746804Z caller=http.go:194 level=debug traceID=6e0b2cc1df079973 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.754867ms" +ts=2024-05-02T12:17:24.546406442Z caller=http.go:194 level=debug traceID=75c2c4105d83fdd4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.865873ms" +ts=2024-05-02T12:17:24.542495407Z caller=http.go:194 level=debug traceID=5a2977a75f7232a6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.696815ms" +ts=2024-05-02T12:17:24.542337333Z caller=http.go:194 level=debug traceID=71095577a9c0e9b6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.394663ms" +ts=2024-05-02T12:17:24.542131352Z caller=http.go:194 level=debug traceID=67becd117c1528c9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.308625ms" +ts=2024-05-02T12:17:24.5419899Z caller=http.go:194 level=debug traceID=292caea58ccc5a16 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 439.885µs" +ts=2024-05-02T12:17:24.541390068Z caller=http.go:194 level=debug traceID=6ace3a36a4fb2a0f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.711835ms" +ts=2024-05-02T12:17:24.541479882Z caller=http.go:194 level=debug traceID=61be611ccb7e7c17 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.642673ms" +ts=2024-05-02T12:17:24.540376034Z caller=http.go:194 level=debug traceID=63a7aefe6c66670a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.428938ms" +ts=2024-05-02T12:17:24.53912871Z caller=http.go:194 level=debug traceID=57afe684138660a4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.922027ms" +ts=2024-05-02T12:17:24.538957154Z caller=http.go:194 level=debug traceID=08fac5329611ca63 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 415.216µs" +ts=2024-05-02T12:17:24.535962621Z caller=http.go:194 level=debug traceID=75c2c4105d83fdd4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.901498ms" +ts=2024-05-02T12:17:24.535514274Z caller=http.go:194 level=debug traceID=718af9f787a5016e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.665099ms" +ts=2024-05-02T12:17:24.534404206Z caller=http.go:194 level=debug traceID=31959b31aa2202af orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.753012ms" +ts=2024-05-02T12:17:24.533717405Z caller=http.go:194 level=debug traceID=079effad8e6401a7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.211063ms" +ts=2024-05-02T12:17:24.533040798Z caller=http.go:194 level=debug traceID=67becd117c1528c9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.163972ms" +ts=2024-05-02T12:17:24.531554141Z caller=http.go:194 level=debug traceID=125be950304ebe49 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 13.103341ms" +ts=2024-05-02T12:17:24.531333747Z caller=http.go:194 level=debug traceID=17caf53da37fb6c3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.839816ms" +ts=2024-05-02T12:17:24.530677385Z caller=http.go:194 level=debug traceID=71095577a9c0e9b6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.529227ms" +ts=2024-05-02T12:17:24.530405979Z caller=http.go:194 level=debug traceID=6ace3a36a4fb2a0f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.00192ms" +ts=2024-05-02T12:17:24.529047372Z caller=http.go:194 level=debug traceID=6b9eb7e881d1fa0e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.105898ms" +ts=2024-05-02T12:17:24.528233839Z caller=http.go:194 level=debug traceID=61be611ccb7e7c17 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.69503ms" +ts=2024-05-02T12:17:24.526252368Z caller=http.go:194 level=debug traceID=3a19bee90b27a038 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.274093ms" +ts=2024-05-02T12:17:24.525186564Z caller=http.go:194 level=debug traceID=17caf53da37fb6c3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.364842ms" +ts=2024-05-02T12:17:24.524376264Z caller=http.go:194 level=debug traceID=4ca9af7f3157ba5a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.19611ms" +ts=2024-05-02T12:17:24.523688494Z caller=http.go:194 level=debug traceID=31959b31aa2202af orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.16027ms" +ts=2024-05-02T12:17:24.523025745Z caller=http.go:194 level=debug traceID=718af9f787a5016e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.23212ms" +ts=2024-05-02T12:17:24.522548322Z caller=http.go:194 level=debug traceID=597acdbf78515a1b orgID=1819 msg="POST /pyroscope/ingest?aggregationType=sum&from=1714652230&name=flamegraph.com.frontend%7Bapp_kubernetes_io_instance%3Dflamegraph-com%2Capp_kubernetes_io_name%3Dflamegraph-com%2Ccluster%3Dflamegraph.com%2Cinstance%3D10.0.9.115%3A9091%2Cjob%3Dkubernetes-pods%2Cnamespace%3Dflamegraph-com%2Cpod%3Dflamegraph-com-frontend-6fb87f8785-pd87k%2Cpod_template_hash%3D6fb87f8785%2Cpyroscope_tenant%3Dpyroscope%2Ctier%3Dfrontend%7D&sampleRate=0&spyName=scrape&units=samples&until=1714652240 (200) 2.189289ms" +ts=2024-05-02T12:17:24.521731554Z caller=http.go:194 level=debug traceID=2830995e1fc11dd8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.953369ms" +ts=2024-05-02T12:17:24.520906682Z caller=http.go:194 level=debug traceID=079effad8e6401a7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.703937ms" +ts=2024-05-02T12:17:24.520908363Z caller=http.go:194 level=debug traceID=6b335758d9392757 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.431277ms" +ts=2024-05-02T12:17:24.520051106Z caller=http.go:194 level=debug traceID=48b8071dc846a6cb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.437149ms" +ts=2024-05-02T12:17:24.518783968Z caller=http.go:194 level=debug traceID=3a19bee90b27a038 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.003913ms" +ts=2024-05-02T12:17:24.518349349Z caller=http.go:194 level=debug traceID=6b9eb7e881d1fa0e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.082946ms" +ts=2024-05-02T12:17:24.517934523Z caller=http.go:194 level=debug traceID=125be950304ebe49 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.039194ms" +ts=2024-05-02T12:17:24.516433987Z caller=http.go:194 level=debug traceID=0a32cfe929123e2b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.334275ms" +ts=2024-05-02T12:17:24.515327594Z caller=http.go:194 level=debug traceID=72fca79a1d6ae3f4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 331.242µs" +ts=2024-05-02T12:17:24.514239733Z caller=http.go:194 level=debug traceID=00744876b361b6be orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.413212ms" +ts=2024-05-02T12:17:24.514059866Z caller=http.go:194 level=debug traceID=4ca9af7f3157ba5a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.821705ms" +ts=2024-05-02T12:17:24.513155735Z caller=http.go:194 level=debug traceID=2830995e1fc11dd8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.887575ms" +ts=2024-05-02T12:17:24.511253179Z caller=http.go:194 level=debug traceID=006c4bf8bf32104d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.086548ms" +ts=2024-05-02T12:17:24.510526337Z caller=http.go:194 level=debug traceID=48b8071dc846a6cb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.888398ms" +ts=2024-05-02T12:17:24.510375758Z caller=http.go:194 level=debug traceID=4da6728d9f9e22c6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.449084ms" +ts=2024-05-02T12:17:24.508937101Z caller=http.go:194 level=debug traceID=425dcef578628b69 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.955443ms" +ts=2024-05-02T12:17:24.508511921Z caller=http.go:194 level=debug traceID=22554d5c55c91e5e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.782732ms" +ts=2024-05-02T12:17:24.507549309Z caller=http.go:194 level=debug traceID=5d5eda75af2f48f4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.245673ms" +ts=2024-05-02T12:17:24.507124558Z caller=http.go:194 level=debug traceID=6b335758d9392757 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.34889ms" +ts=2024-05-02T12:17:24.506748583Z caller=http.go:194 level=debug traceID=0a32cfe929123e2b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.47092ms" +ts=2024-05-02T12:17:24.505176041Z caller=http.go:194 level=debug traceID=696796e3109762b0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.981034ms" +ts=2024-05-02T12:17:24.504794432Z caller=http.go:194 level=debug traceID=72fca79a1d6ae3f4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 398.21µs" +ts=2024-05-02T12:17:24.503358426Z caller=http.go:194 level=debug traceID=6853323d3d79a14e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.585283ms" +ts=2024-05-02T12:17:24.502911002Z caller=http.go:194 level=debug traceID=49df369007a6319c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.549948ms" +ts=2024-05-02T12:17:24.50290346Z caller=http.go:194 level=debug traceID=00744876b361b6be orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.255821ms" +ts=2024-05-02T12:17:24.501535651Z caller=http.go:194 level=debug traceID=006c4bf8bf32104d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.681399ms" +ts=2024-05-02T12:17:24.498813933Z caller=http.go:194 level=debug traceID=3187356d8dfa2d54 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.345044ms" +ts=2024-05-02T12:17:24.498652985Z caller=http.go:194 level=debug traceID=4da6728d9f9e22c6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.323523ms" +ts=2024-05-02T12:17:24.498540672Z caller=http.go:194 level=debug traceID=425dcef578628b69 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.614791ms" +ts=2024-05-02T12:17:24.497321263Z caller=http.go:194 level=debug traceID=22554d5c55c91e5e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.830315ms" +ts=2024-05-02T12:17:24.496530925Z caller=http.go:194 level=debug traceID=479d44eabc90a03c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.580354ms" +ts=2024-05-02T12:17:24.496011593Z caller=http.go:194 level=debug traceID=5d5eda75af2f48f4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.990279ms" +ts=2024-05-02T12:17:24.494841873Z caller=http.go:194 level=debug traceID=696796e3109762b0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.0248ms" +ts=2024-05-02T12:17:24.49395916Z caller=http.go:194 level=debug traceID=6853323d3d79a14e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.322466ms" +ts=2024-05-02T12:17:24.493085216Z caller=http.go:194 level=debug traceID=7cf692aa33891d99 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.575314ms" +ts=2024-05-02T12:17:24.491499501Z caller=http.go:194 level=debug traceID=49df369007a6319c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.388382ms" +ts=2024-05-02T12:17:24.489781303Z caller=http.go:194 level=debug traceID=109e1a486a99848b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.529172ms" +ts=2024-05-02T12:17:24.489844201Z caller=http.go:194 level=debug traceID=582abcc921153720 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.713524ms" +ts=2024-05-02T12:17:24.488124408Z caller=http.go:194 level=debug traceID=4c77b3fa17f332c6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.387503ms" +ts=2024-05-02T12:17:24.486941475Z caller=http.go:194 level=debug traceID=479d44eabc90a03c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.650639ms" +ts=2024-05-02T12:17:24.486573673Z caller=http.go:194 level=debug traceID=3187356d8dfa2d54 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.783906ms" +ts=2024-05-02T12:17:24.486153738Z caller=http.go:194 level=debug traceID=2e38b923222b58de orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.719442ms" +ts=2024-05-02T12:17:24.486111344Z caller=http.go:194 level=debug traceID=0fc68f8cb972d2cf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.494893ms" +ts=2024-05-02T12:17:24.485517571Z caller=http.go:194 level=debug traceID=1ae66d3db7221c3d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.4661ms" +ts=2024-05-02T12:17:24.484531194Z caller=http.go:194 level=debug traceID=5b93c1eaafbaf032 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.276528ms" +ts=2024-05-02T12:17:24.48350613Z caller=http.go:194 level=debug traceID=69dfde13fd148ca9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.376802ms" +ts=2024-05-02T12:17:24.483334092Z caller=http.go:194 level=debug traceID=7cf692aa33891d99 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.731558ms" +ts=2024-05-02T12:17:24.48211062Z caller=http.go:194 level=debug traceID=1b64ac203f0bc982 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.996607ms" +ts=2024-05-02T12:17:24.482028047Z caller=http.go:194 level=debug traceID=22839f2264aa2971 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.921149ms" +ts=2024-05-02T12:17:24.481822163Z caller=http.go:194 level=debug traceID=670c3965b9db548c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.15349ms" +ts=2024-05-02T12:17:24.481096534Z caller=http.go:194 level=debug traceID=1a99344896cc6fe3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.134773ms" +ts=2024-05-02T12:17:24.478860015Z caller=http.go:194 level=debug traceID=4dac25ada439cacc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.374219ms" +ts=2024-05-02T12:17:24.478885897Z caller=http.go:194 level=debug traceID=50e0941be01e81d6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.184701ms" +ts=2024-05-02T12:17:24.478715794Z caller=http.go:194 level=debug traceID=582abcc921153720 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.973351ms" +ts=2024-05-02T12:17:24.478338723Z caller=http.go:194 level=debug traceID=4c77b3fa17f332c6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.099756ms" +ts=2024-05-02T12:17:24.478339649Z caller=http.go:194 level=debug traceID=109e1a486a99848b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.270622ms" +ts=2024-05-02T12:17:24.476205026Z caller=http.go:194 level=debug traceID=0fc68f8cb972d2cf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.459188ms" +ts=2024-05-02T12:17:24.475325206Z caller=http.go:194 level=debug traceID=1ae66d3db7221c3d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.338805ms" +ts=2024-05-02T12:17:24.47425377Z caller=http.go:194 level=debug traceID=5b93c1eaafbaf032 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.803353ms" +ts=2024-05-02T12:17:24.474357955Z caller=http.go:194 level=debug traceID=2e38b923222b58de orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.004721ms" +ts=2024-05-02T12:17:24.473901723Z caller=http.go:194 level=debug traceID=2ed2ca7001373cfc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.880876ms" +ts=2024-05-02T12:17:24.473319072Z caller=http.go:194 level=debug traceID=04c6dd74c89b539c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.931196ms" +ts=2024-05-02T12:17:24.473080888Z caller=http.go:194 level=debug traceID=69dfde13fd148ca9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.461871ms" +ts=2024-05-02T12:17:24.472559629Z caller=http.go:194 level=debug traceID=76d4350c625e9206 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 19.681818ms" +ts=2024-05-02T12:17:24.472529947Z caller=http.go:194 level=debug traceID=5912731e738405ac orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.798368ms" +ts=2024-05-02T12:17:24.4720072Z caller=http.go:194 level=debug traceID=22839f2264aa2971 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.858966ms" +ts=2024-05-02T12:17:24.470950309Z caller=http.go:194 level=debug traceID=1b64ac203f0bc982 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.556404ms" +ts=2024-05-02T12:17:24.470210169Z caller=http.go:194 level=debug traceID=670c3965b9db548c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.193492ms" +ts=2024-05-02T12:17:24.470026172Z caller=http.go:194 level=debug traceID=50e0941be01e81d6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.571621ms" +ts=2024-05-02T12:17:24.4696314Z caller=http.go:194 level=debug traceID=1a99344896cc6fe3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.78734ms" +ts=2024-05-02T12:17:24.46947786Z caller=http.go:194 level=debug traceID=1eb5d651379f843d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.148674ms" +ts=2024-05-02T12:17:24.469338345Z caller=http.go:194 level=debug traceID=7fc7b5885de75a68 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 246.977µs" +ts=2024-05-02T12:17:24.466832958Z caller=http.go:194 level=debug traceID=4dac25ada439cacc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.086363ms" +ts=2024-05-02T12:17:24.466586764Z caller=http.go:194 level=debug traceID=06c9f74bdc4886a0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 349.569µs" +ts=2024-05-02T12:17:24.466507932Z caller=http.go:194 level=debug traceID=489466b83b3a3b2a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.037139ms" +ts=2024-05-02T12:17:24.466316792Z caller=http.go:194 level=debug traceID=75062f5c93214b61 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.604605ms" +ts=2024-05-02T12:17:24.466049283Z caller=http.go:194 level=debug traceID=5e6b2e5ba57877a5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.760835ms" +ts=2024-05-02T12:17:24.465255361Z caller=http.go:194 level=debug traceID=1c3cd493d965dae6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 21.604131ms" +ts=2024-05-02T12:17:24.463788973Z caller=http.go:194 level=debug traceID=4dff41b0b081dca1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 17.847931ms" +ts=2024-05-02T12:17:24.463857475Z caller=http.go:194 level=debug traceID=12f248df444c106d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.090227ms" +ts=2024-05-02T12:17:24.462788261Z caller=http.go:194 level=debug traceID=65128c1003de2643 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.826962ms" +ts=2024-05-02T12:17:24.462060453Z caller=http.go:194 level=debug traceID=04c6dd74c89b539c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.468781ms" +ts=2024-05-02T12:17:24.461938486Z caller=http.go:194 level=debug traceID=79faeb8a5d4f5b7f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.559188ms" +ts=2024-05-02T12:17:24.461422432Z caller=http.go:194 level=debug traceID=2ed2ca7001373cfc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.884963ms" +ts=2024-05-02T12:17:24.460731886Z caller=http.go:194 level=debug traceID=5912731e738405ac orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.196684ms" +ts=2024-05-02T12:17:24.458972921Z caller=http.go:194 level=debug traceID=1eb5d651379f843d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.369233ms" +ts=2024-05-02T12:17:24.45787825Z caller=http.go:194 level=debug traceID=7fc7b5885de75a68 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 396.468µs" +ts=2024-05-02T12:17:24.456780351Z caller=http.go:194 level=debug traceID=14acb957b4709cd6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.958566ms" +ts=2024-05-02T12:17:24.456227495Z caller=http.go:194 level=debug traceID=06c9f74bdc4886a0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 332.514µs" +ts=2024-05-02T12:17:24.455588051Z caller=http.go:194 level=debug traceID=75062f5c93214b61 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.970423ms" +ts=2024-05-02T12:17:24.455165062Z caller=http.go:194 level=debug traceID=0ddbe063b9ac68ec orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.14866ms" +ts=2024-05-02T12:17:24.454740533Z caller=http.go:194 level=debug traceID=2fb195f04def611b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.096982ms" +ts=2024-05-02T12:17:24.454500119Z caller=http.go:194 level=debug traceID=58dd4089008db850 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.005324ms" +ts=2024-05-02T12:17:24.454289099Z caller=http.go:194 level=debug traceID=6c359636320923ec orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.577536ms" +ts=2024-05-02T12:17:24.453676218Z caller=http.go:194 level=debug traceID=5e6b2e5ba57877a5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.913967ms" +ts=2024-05-02T12:17:24.453209144Z caller=http.go:194 level=debug traceID=65128c1003de2643 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.181167ms" +ts=2024-05-02T12:17:24.453120548Z caller=http.go:194 level=debug traceID=4290cb141dfe1203 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.340256ms" +ts=2024-05-02T12:17:24.453055925Z caller=http.go:194 level=debug traceID=1c3cd493d965dae6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 20.812802ms" +ts=2024-05-02T12:17:24.452112916Z caller=http.go:194 level=debug traceID=12f248df444c106d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.982473ms" +ts=2024-05-02T12:17:24.451574177Z caller=http.go:194 level=debug traceID=2bfd5436fee85638 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.166732ms" +ts=2024-05-02T12:17:24.451480608Z caller=http.go:194 level=debug traceID=7fc28e3809f5da1d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 266.187µs" +ts=2024-05-02T12:17:24.451271616Z caller=http.go:194 level=debug traceID=4dff41b0b081dca1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 18.019887ms" +ts=2024-05-02T12:17:24.451121056Z caller=http.go:194 level=debug traceID=76d4350c625e9206 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.065674ms" +ts=2024-05-02T12:17:24.451063803Z caller=http.go:194 level=debug traceID=79faeb8a5d4f5b7f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.856703ms" +ts=2024-05-02T12:17:24.451045441Z caller=http.go:194 level=debug traceID=45dc89565e31fe8e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.026798ms" +ts=2024-05-02T12:17:24.450585771Z caller=http.go:194 level=debug traceID=489466b83b3a3b2a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.517265ms" +ts=2024-05-02T12:17:24.448985951Z caller=http.go:194 level=debug traceID=3c08469cf7749102 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.629448ms" +ts=2024-05-02T12:17:24.448678333Z caller=http.go:194 level=debug traceID=2fb195f04def611b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.921914ms" +ts=2024-05-02T12:17:24.446777987Z caller=http.go:194 level=debug traceID=5a2a149c7d084605 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.262152ms" +ts=2024-05-02T12:17:24.446783426Z caller=http.go:194 level=debug traceID=7efeb1068b01118b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.002869ms" +ts=2024-05-02T12:17:24.444466455Z caller=http.go:194 level=debug traceID=14acb957b4709cd6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.196891ms" +ts=2024-05-02T12:17:24.44405371Z caller=http.go:194 level=debug traceID=0ddbe063b9ac68ec orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.282126ms" +ts=2024-05-02T12:17:24.442808086Z caller=http.go:194 level=debug traceID=4290cb141dfe1203 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.099595ms" +ts=2024-05-02T12:17:24.441318133Z caller=http.go:194 level=debug traceID=7fc28e3809f5da1d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 498.937µs" +ts=2024-05-02T12:17:24.440717078Z caller=http.go:194 level=debug traceID=2cd7c5b804cf7ec6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 194.161µs" +ts=2024-05-02T12:17:24.440414039Z caller=http.go:194 level=debug traceID=2bfd5436fee85638 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.055008ms" +ts=2024-05-02T12:17:24.439990215Z caller=http.go:194 level=debug traceID=62da57eecaeaa7e6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.158372ms" +ts=2024-05-02T12:17:24.439512167Z caller=http.go:194 level=debug traceID=3c08469cf7749102 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.228821ms" +ts=2024-05-02T12:17:24.439429821Z caller=http.go:194 level=debug traceID=45dc89565e31fe8e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.336295ms" +ts=2024-05-02T12:17:24.437690742Z caller=http.go:194 level=debug traceID=6228389ad682d3aa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.948596ms" +ts=2024-05-02T12:17:24.437410872Z caller=http.go:194 level=debug traceID=58dd4089008db850 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.919174ms" +ts=2024-05-02T12:17:24.436995702Z caller=http.go:194 level=debug traceID=3abc3d05f99193bd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.022223ms" +ts=2024-05-02T12:17:24.436321077Z caller=http.go:194 level=debug traceID=6e444671f7e2bd48 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.723564ms" +ts=2024-05-02T12:17:24.436185069Z caller=http.go:194 level=debug traceID=7082b1459a389207 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.398133ms" +ts=2024-05-02T12:17:24.435512926Z caller=http.go:194 level=debug traceID=5a2a149c7d084605 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.821151ms" +ts=2024-05-02T12:17:24.435230663Z caller=http.go:194 level=debug traceID=7efeb1068b01118b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.172395ms" +ts=2024-05-02T12:17:24.43448895Z caller=http.go:194 level=debug traceID=6c359636320923ec orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.470415ms" +ts=2024-05-02T12:17:24.433482308Z caller=http.go:194 level=debug traceID=166123ca4c149fd5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.010336ms" +ts=2024-05-02T12:17:24.433042924Z caller=http.go:194 level=debug traceID=61faa48c3e03c58c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 16.989434ms" +ts=2024-05-02T12:17:24.429732718Z caller=http.go:194 level=debug traceID=2cd7c5b804cf7ec6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 345.473µs" +ts=2024-05-02T12:17:24.428675238Z caller=http.go:194 level=debug traceID=4b58a3d3c7aab3d5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 17.642494ms" +ts=2024-05-02T12:17:24.428479296Z caller=http.go:194 level=debug traceID=6824c3ca2ad1ed63 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.687667ms" +ts=2024-05-02T12:17:24.427176559Z caller=http.go:194 level=debug traceID=6e444671f7e2bd48 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.315065ms" +ts=2024-05-02T12:17:24.427135662Z caller=http.go:194 level=debug traceID=6db6bb48c11bfffd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.34619ms" +ts=2024-05-02T12:17:24.426336856Z caller=http.go:194 level=debug traceID=3ba75bddc5432157 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.638569ms" +ts=2024-05-02T12:17:24.426051808Z caller=http.go:194 level=debug traceID=62da57eecaeaa7e6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.368596ms" +ts=2024-05-02T12:17:24.425765914Z caller=http.go:194 level=debug traceID=6228389ad682d3aa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.864153ms" +ts=2024-05-02T12:17:24.425447543Z caller=http.go:194 level=debug traceID=7082b1459a389207 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.857332ms" +ts=2024-05-02T12:17:24.425171996Z caller=http.go:194 level=debug traceID=3abc3d05f99193bd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.383957ms" +ts=2024-05-02T12:17:24.424019849Z caller=http.go:194 level=debug traceID=32407b4fc1bbbef6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.558904ms" +ts=2024-05-02T12:17:24.424055115Z caller=http.go:194 level=debug traceID=2424571fdc7153a9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 252.869µs" +ts=2024-05-02T12:17:24.423793538Z caller=http.go:194 level=debug traceID=798e9b182b074d0d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.044558ms" +ts=2024-05-02T12:17:24.423555311Z caller=http.go:194 level=debug traceID=5de8f462e7f899d5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.226373ms" +ts=2024-05-02T12:17:24.421639857Z caller=http.go:194 level=debug traceID=166123ca4c149fd5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.526653ms" +ts=2024-05-02T12:17:24.420693968Z caller=http.go:194 level=debug traceID=61faa48c3e03c58c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 23.287491ms" +ts=2024-05-02T12:17:24.419217349Z caller=http.go:194 level=debug traceID=73468f5e95c795c6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.759112ms" +ts=2024-05-02T12:17:24.418715544Z caller=http.go:194 level=debug traceID=4b58a3d3c7aab3d5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 20.44888ms" +ts=2024-05-02T12:17:24.417986273Z caller=http.go:194 level=debug traceID=6ca3690ee1368734 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.045793ms" +ts=2024-05-02T12:17:24.417816719Z caller=http.go:194 level=debug traceID=41ada09edf9cdd65 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.75438ms" +ts=2024-05-02T12:17:24.416724836Z caller=http.go:194 level=debug traceID=6824c3ca2ad1ed63 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.14503ms" +ts=2024-05-02T12:17:24.416373667Z caller=http.go:194 level=debug traceID=3ba75bddc5432157 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.326563ms" +ts=2024-05-02T12:17:24.4164946Z caller=http.go:194 level=debug traceID=337475277ac8250f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.726677ms" +ts=2024-05-02T12:17:24.415634506Z caller=http.go:194 level=debug traceID=6db6bb48c11bfffd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.061228ms" +ts=2024-05-02T12:17:24.414813633Z caller=http.go:194 level=debug traceID=59a871391438bf45 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.412696ms" +ts=2024-05-02T12:17:24.41269216Z caller=http.go:194 level=debug traceID=798e9b182b074d0d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.575901ms" +ts=2024-05-02T12:17:24.412568574Z caller=http.go:194 level=debug traceID=2424571fdc7153a9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 354.875µs" +ts=2024-05-02T12:17:24.412553825Z caller=http.go:194 level=debug traceID=09532c6ede5aed3f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 285.406µs" +ts=2024-05-02T12:17:24.412303006Z caller=http.go:194 level=debug traceID=5de8f462e7f899d5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.275106ms" +ts=2024-05-02T12:17:24.410076507Z caller=http.go:194 level=debug traceID=7c43529c09c00333 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.012832ms" +ts=2024-05-02T12:17:24.40995734Z caller=http.go:194 level=debug traceID=1fbe5e23caf85e3e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.789832ms" +ts=2024-05-02T12:17:24.409910964Z caller=http.go:194 level=debug traceID=694f55a92a5bdc06 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.72541ms" +ts=2024-05-02T12:17:24.409147634Z caller=http.go:194 level=debug traceID=584e834966031c15 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.829114ms" +ts=2024-05-02T12:17:24.408647006Z caller=http.go:194 level=debug traceID=32407b4fc1bbbef6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.481263ms" +ts=2024-05-02T12:17:24.408032783Z caller=http.go:194 level=debug traceID=41ada09edf9cdd65 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.303942ms" +ts=2024-05-02T12:17:24.40733511Z caller=http.go:194 level=debug traceID=6ca3690ee1368734 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.464162ms" +ts=2024-05-02T12:17:24.405638883Z caller=http.go:194 level=debug traceID=73468f5e95c795c6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.970125ms" +ts=2024-05-02T12:17:24.404169226Z caller=http.go:194 level=debug traceID=56a73ffbcbec7608 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.925325ms" +ts=2024-05-02T12:17:24.404001524Z caller=http.go:194 level=debug traceID=59a871391438bf45 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.235647ms" +ts=2024-05-02T12:17:24.403963599Z caller=http.go:194 level=debug traceID=337475277ac8250f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.729834ms" +ts=2024-05-02T12:17:24.403852217Z caller=http.go:194 level=debug traceID=515da2cd1699c145 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.14307ms" +ts=2024-05-02T12:17:24.402701227Z caller=http.go:194 level=debug traceID=000329d0b8b630fe orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.933979ms" +ts=2024-05-02T12:17:24.400333566Z caller=http.go:194 level=debug traceID=3894bd4be9b0ee68 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.422222ms" +ts=2024-05-02T12:17:24.400217915Z caller=http.go:194 level=debug traceID=7c43529c09c00333 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.337001ms" +ts=2024-05-02T12:17:24.400193421Z caller=http.go:194 level=debug traceID=713c1b3bd9455eff orgID=3648 msg="POST /push.v1.PusherService/Push (200) 412.609µs" +ts=2024-05-02T12:17:24.399626554Z caller=http.go:194 level=debug traceID=09532c6ede5aed3f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 287.48µs" +ts=2024-05-02T12:17:24.399530566Z caller=http.go:194 level=debug traceID=1fbe5e23caf85e3e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.73177ms" +ts=2024-05-02T12:17:24.399414187Z caller=http.go:194 level=debug traceID=4645e134eb6e8459 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.553488ms" +ts=2024-05-02T12:17:24.398492626Z caller=http.go:194 level=debug traceID=584e834966031c15 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.799544ms" +ts=2024-05-02T12:17:24.398328246Z caller=http.go:194 level=debug traceID=694f55a92a5bdc06 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.819078ms" +ts=2024-05-02T12:17:24.398261935Z caller=http.go:194 level=debug traceID=3e73b8291b923b6f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.333842ms" +ts=2024-05-02T12:17:24.395298981Z caller=http.go:194 level=debug traceID=63296b5b5822684d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.280881ms" +ts=2024-05-02T12:17:24.394208465Z caller=http.go:194 level=debug traceID=25f0cb4b300e59e1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.774206ms" +ts=2024-05-02T12:17:24.393820289Z caller=http.go:194 level=debug traceID=3f6436d2152c18ab orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.324106ms" +ts=2024-05-02T12:17:24.393248477Z caller=http.go:194 level=debug traceID=0ed8510a94388f2f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.802384ms" +ts=2024-05-02T12:17:24.392182691Z caller=http.go:194 level=debug traceID=515da2cd1699c145 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.689207ms" +ts=2024-05-02T12:17:24.391890232Z caller=http.go:194 level=debug traceID=56a73ffbcbec7608 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.653783ms" +ts=2024-05-02T12:17:24.391486382Z caller=http.go:194 level=debug traceID=000329d0b8b630fe orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.771147ms" +ts=2024-05-02T12:17:24.390967436Z caller=http.go:194 level=debug traceID=5bacacc3dbd0f300 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.747076ms" +ts=2024-05-02T12:17:24.389838792Z caller=http.go:194 level=debug traceID=3e205465fb6c9209 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.461182ms" +ts=2024-05-02T12:17:24.38953818Z caller=http.go:194 level=debug traceID=3e73b8291b923b6f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.546085ms" +ts=2024-05-02T12:17:24.389512325Z caller=http.go:194 level=debug traceID=05bc0b9d9b9fa163 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.870208ms" +ts=2024-05-02T12:17:24.388964736Z caller=http.go:194 level=debug traceID=4645e134eb6e8459 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 16.175666ms" +ts=2024-05-02T12:17:24.388799793Z caller=http.go:194 level=debug traceID=713c1b3bd9455eff orgID=1218 msg="POST /push.v1.PusherService/Push (200) 241.388µs" +ts=2024-05-02T12:17:24.388340057Z caller=http.go:194 level=debug traceID=3894bd4be9b0ee68 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.284866ms" +ts=2024-05-02T12:17:24.38630272Z caller=http.go:194 level=debug traceID=15fa66c35baf579d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.181319ms" +ts=2024-05-02T12:17:24.38610936Z caller=http.go:194 level=debug traceID=2c962e05679dc485 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.889377ms" +ts=2024-05-02T12:17:24.38592686Z caller=http.go:194 level=debug traceID=14a10d777d216aaf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.160001ms" +ts=2024-05-02T12:17:24.385442841Z caller=http.go:194 level=debug traceID=0c7282fe90d3970a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.15482ms" +ts=2024-05-02T12:17:24.38411299Z caller=http.go:194 level=debug traceID=0ed8510a94388f2f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.733254ms" +ts=2024-05-02T12:17:24.383659124Z caller=http.go:194 level=debug traceID=25f0cb4b300e59e1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.504713ms" +ts=2024-05-02T12:17:24.38364357Z caller=http.go:194 level=debug traceID=63296b5b5822684d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.145045ms" +ts=2024-05-02T12:17:24.382398551Z caller=http.go:194 level=debug traceID=14744a29bb32cd62 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.19082ms" +ts=2024-05-02T12:17:24.38204972Z caller=http.go:194 level=debug traceID=3f6436d2152c18ab orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.818205ms" +ts=2024-05-02T12:17:24.380512095Z caller=http.go:194 level=debug traceID=5bacacc3dbd0f300 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.761075ms" +ts=2024-05-02T12:17:24.378020336Z caller=http.go:194 level=debug traceID=3e205465fb6c9209 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.827633ms" +ts=2024-05-02T12:17:24.376580326Z caller=http.go:194 level=debug traceID=1c75a068e9728395 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.70134ms" +ts=2024-05-02T12:17:24.376342717Z caller=http.go:194 level=debug traceID=0c7282fe90d3970a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.949988ms" +ts=2024-05-02T12:17:24.375315073Z caller=http.go:194 level=debug traceID=05bc0b9d9b9fa163 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.494088ms" +ts=2024-05-02T12:17:24.37504154Z caller=http.go:194 level=debug traceID=2c962e05679dc485 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.468726ms" +ts=2024-05-02T12:17:24.374736003Z caller=http.go:194 level=debug traceID=4e7f696a3450ef8b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.372036ms" +ts=2024-05-02T12:17:24.37462616Z caller=http.go:194 level=debug traceID=15fa66c35baf579d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.949144ms" +ts=2024-05-02T12:17:24.373550765Z caller=http.go:194 level=debug traceID=50eb224710dfd018 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.748711ms" +ts=2024-05-02T12:17:24.372788574Z caller=http.go:194 level=debug traceID=14a10d777d216aaf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.923606ms" +ts=2024-05-02T12:17:24.371682524Z caller=http.go:194 level=debug traceID=1155bfd30a9f7183 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.945629ms" +ts=2024-05-02T12:17:24.371026956Z caller=http.go:194 level=debug traceID=14744a29bb32cd62 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.094482ms" +ts=2024-05-02T12:17:24.369071994Z caller=http.go:194 level=debug traceID=18017f358d42dbe4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.209304ms" +ts=2024-05-02T12:17:24.368502607Z caller=http.go:194 level=debug traceID=23692ed4a4036529 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.316184ms" +ts=2024-05-02T12:17:24.366866972Z caller=http.go:194 level=debug traceID=5ebd21333e52a366 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.350694ms" +ts=2024-05-02T12:17:24.366394639Z caller=http.go:194 level=debug traceID=1c75a068e9728395 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.194166ms" +ts=2024-05-02T12:17:24.365629342Z caller=http.go:194 level=debug traceID=0b11e4acd2eb42c3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.865971ms" +ts=2024-05-02T12:17:24.365238151Z caller=http.go:194 level=debug traceID=31753afb77f0a6ac orgID=3648 msg="POST /push.v1.PusherService/Push (400) 199.184µs" +ts=2024-05-02T12:17:24.363587788Z caller=http.go:194 level=debug traceID=4e7f696a3450ef8b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.039584ms" +ts=2024-05-02T12:17:24.362325081Z caller=http.go:194 level=debug traceID=50eb224710dfd018 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.718876ms" +ts=2024-05-02T12:17:24.36213785Z caller=http.go:194 level=debug traceID=18017f358d42dbe4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.723209ms" +ts=2024-05-02T12:17:24.362038034Z caller=http.go:194 level=debug traceID=3485512bcbab9a3e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.269445ms" +ts=2024-05-02T12:17:24.360923754Z caller=http.go:194 level=debug traceID=2aab59f72e323e04 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.448465ms" +ts=2024-05-02T12:17:24.359576053Z caller=http.go:194 level=debug traceID=007713bbb66eb93b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.984765ms" +ts=2024-05-02T12:17:24.35958792Z caller=http.go:194 level=debug traceID=1155bfd30a9f7183 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.199525ms" +ts=2024-05-02T12:17:24.35815772Z caller=http.go:194 level=debug traceID=23692ed4a4036529 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.941756ms" +ts=2024-05-02T12:17:24.35651813Z caller=http.go:194 level=debug traceID=7ac31d4e1b62c752 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.118311ms" +ts=2024-05-02T12:17:24.35568205Z caller=http.go:194 level=debug traceID=5ebd21333e52a366 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.801087ms" +ts=2024-05-02T12:17:24.354456484Z caller=http.go:194 level=debug traceID=31753afb77f0a6ac orgID=1218 msg="POST /push.v1.PusherService/Push (400) 162.159µs" +ts=2024-05-02T12:17:24.353605538Z caller=http.go:194 level=debug traceID=0b11e4acd2eb42c3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.939252ms" +ts=2024-05-02T12:17:24.353319987Z caller=http.go:194 level=debug traceID=14600be25e7a568f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.182132ms" +ts=2024-05-02T12:17:24.352962395Z caller=http.go:194 level=debug traceID=1ddad7e3ff2b0e71 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.533306ms" +ts=2024-05-02T12:17:24.351436962Z caller=http.go:194 level=debug traceID=525d040721a81ef3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.980313ms" +ts=2024-05-02T12:17:24.350478956Z caller=http.go:194 level=debug traceID=3485512bcbab9a3e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.054187ms" +ts=2024-05-02T12:17:24.349450387Z caller=http.go:194 level=debug traceID=392baab247d9b298 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.984224ms" +ts=2024-05-02T12:17:24.348676135Z caller=http.go:194 level=debug traceID=2aab59f72e323e04 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.47695ms" +ts=2024-05-02T12:17:24.348374868Z caller=http.go:194 level=debug traceID=007713bbb66eb93b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.064884ms" +ts=2024-05-02T12:17:24.346147734Z caller=http.go:194 level=debug traceID=0566e7d086897163 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.315555ms" +ts=2024-05-02T12:17:24.34551662Z caller=http.go:194 level=debug traceID=7ac31d4e1b62c752 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.715556ms" +ts=2024-05-02T12:17:24.345292521Z caller=http.go:194 level=debug traceID=2dd6052d72dd1827 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.252661ms" +ts=2024-05-02T12:17:24.345227091Z caller=http.go:194 level=debug traceID=1a5b468d64680b7f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.652511ms" +ts=2024-05-02T12:17:24.345344631Z caller=http.go:194 level=debug traceID=552907d38cf0fb64 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.638489ms" +ts=2024-05-02T12:17:24.343390942Z caller=http.go:194 level=debug traceID=5394dcf7116374cd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.532999ms" +ts=2024-05-02T12:17:24.343069259Z caller=http.go:194 level=debug traceID=1ddad7e3ff2b0e71 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.668282ms" +ts=2024-05-02T12:17:24.342549157Z caller=http.go:194 level=debug traceID=54e7014246fe8039 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 284.892µs" +ts=2024-05-02T12:17:24.342455568Z caller=http.go:194 level=debug traceID=14600be25e7a568f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.422956ms" +ts=2024-05-02T12:17:24.341316313Z caller=http.go:194 level=debug traceID=574ef79fb2a51e02 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.232266ms" +ts=2024-05-02T12:17:24.340399601Z caller=http.go:194 level=debug traceID=16f13aa8480fd444 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.580416ms" +ts=2024-05-02T12:17:24.340271842Z caller=http.go:194 level=debug traceID=525d040721a81ef3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.876425ms" +ts=2024-05-02T12:17:24.338902461Z caller=http.go:194 level=debug traceID=17c37d490c00ff11 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.527381ms" +ts=2024-05-02T12:17:24.338519189Z caller=http.go:194 level=debug traceID=3a2a0a94bbdf12d7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 303.345µs" +ts=2024-05-02T12:17:24.338247963Z caller=http.go:194 level=debug traceID=1614b53267427f18 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.885116ms" +ts=2024-05-02T12:17:24.337607336Z caller=http.go:194 level=debug traceID=392baab247d9b298 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.722662ms" +ts=2024-05-02T12:17:24.336938702Z caller=http.go:194 level=debug traceID=281e213de6f7dcad orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.637305ms" +ts=2024-05-02T12:17:24.336793769Z caller=http.go:194 level=debug traceID=2dd6052d72dd1827 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.152676ms" +ts=2024-05-02T12:17:24.336109781Z caller=http.go:194 level=debug traceID=2f7c57f114c50597 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.541722ms" +ts=2024-05-02T12:17:24.335399364Z caller=http.go:194 level=debug traceID=0566e7d086897163 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.629842ms" +ts=2024-05-02T12:17:24.3349443Z caller=http.go:194 level=debug traceID=318bbb8ccfc50d79 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.786467ms" +ts=2024-05-02T12:17:24.334951584Z caller=http.go:194 level=debug traceID=160f278c65e2f65c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.901812ms" +ts=2024-05-02T12:17:24.334015234Z caller=http.go:194 level=debug traceID=552907d38cf0fb64 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.422671ms" +ts=2024-05-02T12:17:24.333912111Z caller=http.go:194 level=debug traceID=1a5b468d64680b7f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.528246ms" +ts=2024-05-02T12:17:24.333093944Z caller=http.go:194 level=debug traceID=54e7014246fe8039 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 589.063µs" +ts=2024-05-02T12:17:24.333213158Z caller=http.go:194 level=debug traceID=10af356b7a376c49 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.249584ms" +ts=2024-05-02T12:17:24.332905216Z caller=http.go:194 level=debug traceID=281e213de6f7dcad orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.224549ms" +ts=2024-05-02T12:17:24.332377154Z caller=http.go:194 level=debug traceID=7c3f3a1c916d6f35 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.875294ms" +ts=2024-05-02T12:17:24.332010155Z caller=http.go:194 level=debug traceID=5394dcf7116374cd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.32683ms" +ts=2024-05-02T12:17:24.331177744Z caller=http.go:194 level=debug traceID=4f4d14a48e28cab4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.722261ms" +ts=2024-05-02T12:17:24.331372906Z caller=http.go:194 level=debug traceID=75700918891bd154 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.522215ms" +ts=2024-05-02T12:17:24.330617182Z caller=http.go:194 level=debug traceID=574ef79fb2a51e02 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.720681ms" +ts=2024-05-02T12:17:24.329922794Z caller=http.go:194 level=debug traceID=57b972653e431b44 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 21.856606ms" +ts=2024-05-02T12:17:24.328458886Z caller=http.go:194 level=debug traceID=3a2a0a94bbdf12d7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 266.295µs" +ts=2024-05-02T12:17:24.32833349Z caller=http.go:194 level=debug traceID=769c3bf775d8caf6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.07808ms" +ts=2024-05-02T12:17:24.328114936Z caller=http.go:194 level=debug traceID=16f13aa8480fd444 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.599277ms" +ts=2024-05-02T12:17:24.327430512Z caller=http.go:194 level=debug traceID=1614b53267427f18 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.087733ms" +ts=2024-05-02T12:17:24.327119138Z caller=http.go:194 level=debug traceID=17c37d490c00ff11 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.04339ms" +ts=2024-05-02T12:17:24.326559134Z caller=http.go:194 level=debug traceID=10af356b7a376c49 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.557934ms" +ts=2024-05-02T12:17:24.32659717Z caller=http.go:194 level=debug traceID=053ac96193851951 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.265352ms" +ts=2024-05-02T12:17:24.32584628Z caller=http.go:194 level=debug traceID=3fdf8fe3abc2a6bd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.123209ms" +ts=2024-05-02T12:17:24.325622001Z caller=http.go:194 level=debug traceID=4452b1d96a0ae6fc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.740584ms" +ts=2024-05-02T12:17:24.325642391Z caller=http.go:194 level=debug traceID=46d3efb00d324dae orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.191158ms" +ts=2024-05-02T12:17:24.323878274Z caller=http.go:194 level=debug traceID=2f7c57f114c50597 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.554242ms" +ts=2024-05-02T12:17:24.323529244Z caller=http.go:194 level=debug traceID=318bbb8ccfc50d79 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.644495ms" +ts=2024-05-02T12:17:24.322237706Z caller=http.go:194 level=debug traceID=18ad507f2aa1b6e6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.326666ms" +ts=2024-05-02T12:17:24.321627598Z caller=http.go:194 level=debug traceID=75700918891bd154 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.86551ms" +ts=2024-05-02T12:17:24.3214431Z caller=http.go:194 level=debug traceID=7c3f3a1c916d6f35 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.124574ms" +ts=2024-05-02T12:17:24.320897971Z caller=http.go:194 level=debug traceID=160f278c65e2f65c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.914746ms" +ts=2024-05-02T12:17:24.32089127Z caller=http.go:194 level=debug traceID=2b447cd3e7416318 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.591849ms" +ts=2024-05-02T12:17:24.319454774Z caller=http.go:194 level=debug traceID=5c2666492cf068b0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.434221ms" +ts=2024-05-02T12:17:24.319014228Z caller=http.go:194 level=debug traceID=1cb6ea683f113802 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 15.311774ms" +ts=2024-05-02T12:17:24.318860136Z caller=http.go:194 level=debug traceID=3ad5a89539f4a8c9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.500811ms" +ts=2024-05-02T12:17:24.318371927Z caller=http.go:194 level=debug traceID=4f4d14a48e28cab4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.870098ms" +ts=2024-05-02T12:17:24.3173515Z caller=http.go:194 level=debug traceID=0d2c6779a293c1cd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.260508ms" +ts=2024-05-02T12:17:24.31655123Z caller=http.go:194 level=debug traceID=053ac96193851951 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.601635ms" +ts=2024-05-02T12:17:24.315616955Z caller=http.go:194 level=debug traceID=769c3bf775d8caf6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.800682ms" +ts=2024-05-02T12:17:24.315162148Z caller=http.go:194 level=debug traceID=2350e11cee9480e3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.106967ms" +ts=2024-05-02T12:17:24.314886603Z caller=http.go:194 level=debug traceID=46d3efb00d324dae orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.133869ms" +ts=2024-05-02T12:17:24.314339162Z caller=http.go:194 level=debug traceID=3fdf8fe3abc2a6bd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.764105ms" +ts=2024-05-02T12:17:24.314185217Z caller=http.go:194 level=debug traceID=024f39ecf09500d0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.150378ms" +ts=2024-05-02T12:17:24.314119867Z caller=http.go:194 level=debug traceID=00f04f21268a619a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.424272ms" +ts=2024-05-02T12:17:24.313637105Z caller=http.go:194 level=debug traceID=3f8697e5144993e1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.782907ms" +ts=2024-05-02T12:17:24.31353879Z caller=http.go:194 level=debug traceID=57b972653e431b44 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.81719ms" +ts=2024-05-02T12:17:24.313294962Z caller=http.go:194 level=debug traceID=4452b1d96a0ae6fc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.443273ms" +ts=2024-05-02T12:17:24.312845146Z caller=http.go:194 level=debug traceID=157684bd4d44b904 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.460742ms" +ts=2024-05-02T12:17:24.312541328Z caller=http.go:194 level=debug traceID=43d8e8084b68596d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.568069ms" +ts=2024-05-02T12:17:24.31249103Z caller=http.go:194 level=debug traceID=256b5409f07eccd1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.243832ms" +ts=2024-05-02T12:17:24.312366686Z caller=http.go:194 level=debug traceID=59e812a575978a07 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.620828ms" +ts=2024-05-02T12:17:24.312242093Z caller=http.go:194 level=debug traceID=0094bd0f8e29bd12 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 284.472µs" +ts=2024-05-02T12:17:24.312082017Z caller=http.go:194 level=debug traceID=6b1ffa1a260230e3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.028957ms" +ts=2024-05-02T12:17:24.311328822Z caller=http.go:194 level=debug traceID=1cb6ea683f113802 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 18.992504ms" +ts=2024-05-02T12:17:24.311306607Z caller=http.go:194 level=debug traceID=18ad507f2aa1b6e6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.524973ms" +ts=2024-05-02T12:17:24.310403386Z caller=http.go:194 level=debug traceID=2b447cd3e7416318 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.487666ms" +ts=2024-05-02T12:17:24.309306087Z caller=http.go:194 level=debug traceID=5c2666492cf068b0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.273102ms" +ts=2024-05-02T12:17:24.308666984Z caller=http.go:194 level=debug traceID=60284031b1888d3b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.224765ms" +ts=2024-05-02T12:17:24.307067507Z caller=http.go:194 level=debug traceID=0e1bd011b37e6a36 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.2911ms" +ts=2024-05-02T12:17:24.3065181Z caller=http.go:194 level=debug traceID=3ad5a89539f4a8c9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.704732ms" +ts=2024-05-02T12:17:24.306543651Z caller=http.go:194 level=debug traceID=3e1841e46d859dab orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.66696ms" +ts=2024-05-02T12:17:24.306096479Z caller=http.go:194 level=debug traceID=6c2496d2e17ca568 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.916896ms" +ts=2024-05-02T12:17:24.306069601Z caller=http.go:194 level=debug traceID=157684bd4d44b904 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.945493ms" +ts=2024-05-02T12:17:24.306075109Z caller=http.go:194 level=debug traceID=0d2c6779a293c1cd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.619584ms" +ts=2024-05-02T12:17:24.305524523Z caller=http.go:194 level=debug traceID=6d203d70dd560968 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.783701ms" +ts=2024-05-02T12:17:24.305189122Z caller=http.go:194 level=debug traceID=57c6dd3330327338 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.198968ms" +ts=2024-05-02T12:17:24.305282187Z caller=http.go:194 level=debug traceID=04a4f1ac7f2029cc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.979141ms" +ts=2024-05-02T12:17:24.30520869Z caller=http.go:194 level=debug traceID=29ccfe8cc4011969 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.783943ms" +ts=2024-05-02T12:17:24.305072327Z caller=http.go:194 level=debug traceID=2a5f43b3e4ceed7f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.376434ms" +ts=2024-05-02T12:17:24.304667108Z caller=http.go:194 level=debug traceID=6236eaf69a5ea916 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.425426ms" +ts=2024-05-02T12:17:24.30446995Z caller=http.go:194 level=debug traceID=00f04f21268a619a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.605952ms" +ts=2024-05-02T12:17:24.303567054Z caller=http.go:194 level=debug traceID=073409fed804b111 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.415827ms" +ts=2024-05-02T12:17:24.303344764Z caller=http.go:194 level=debug traceID=2350e11cee9480e3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.712935ms" +ts=2024-05-02T12:17:24.303144429Z caller=http.go:194 level=debug traceID=024f39ecf09500d0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.281706ms" +ts=2024-05-02T12:17:24.302624976Z caller=http.go:194 level=debug traceID=3f8697e5144993e1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.214021ms" +ts=2024-05-02T12:17:24.302445302Z caller=http.go:194 level=debug traceID=256b5409f07eccd1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.051997ms" +ts=2024-05-02T12:17:24.302317925Z caller=http.go:194 level=debug traceID=0094bd0f8e29bd12 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 404.515µs" +ts=2024-05-02T12:17:24.301889634Z caller=http.go:194 level=debug traceID=43d8e8084b68596d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.538082ms" +ts=2024-05-02T12:17:24.301466212Z caller=http.go:194 level=debug traceID=59e812a575978a07 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.595399ms" +ts=2024-05-02T12:17:24.301194688Z caller=http.go:194 level=debug traceID=08248e6765f04d66 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.992146ms" +ts=2024-05-02T12:17:24.299253489Z caller=http.go:194 level=debug traceID=40b03c559bd9d584 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.604827ms" +ts=2024-05-02T12:17:24.298235376Z caller=http.go:194 level=debug traceID=60284031b1888d3b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.814766ms" +ts=2024-05-02T12:17:24.29789447Z caller=http.go:194 level=debug traceID=3e4c29ceeda773b4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.525592ms" +ts=2024-05-02T12:17:24.29697336Z caller=http.go:194 level=debug traceID=410b7f7c31827fd7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.715963ms" +ts=2024-05-02T12:17:24.295218795Z caller=http.go:194 level=debug traceID=3e1841e46d859dab orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.696661ms" +ts=2024-05-02T12:17:24.295113625Z caller=http.go:194 level=debug traceID=0e1bd011b37e6a36 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.031328ms" +ts=2024-05-02T12:17:24.295131186Z caller=http.go:194 level=debug traceID=6986b45a5d7cf39c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.379118ms" +ts=2024-05-02T12:17:24.294847922Z caller=http.go:194 level=debug traceID=4abba9d6edc7590e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.11404ms" +ts=2024-05-02T12:17:24.294688765Z caller=http.go:194 level=debug traceID=04a4f1ac7f2029cc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.281792ms" +ts=2024-05-02T12:17:24.294660765Z caller=http.go:194 level=debug traceID=57c6dd3330327338 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.864712ms" +ts=2024-05-02T12:17:24.294644274Z caller=http.go:194 level=debug traceID=6236eaf69a5ea916 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.312854ms" +ts=2024-05-02T12:17:24.29462542Z caller=http.go:194 level=debug traceID=2a5f43b3e4ceed7f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.332687ms" +ts=2024-05-02T12:17:24.294024326Z caller=http.go:194 level=debug traceID=6b1ffa1a260230e3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.566041ms" +ts=2024-05-02T12:17:24.293808832Z caller=http.go:194 level=debug traceID=6d203d70dd560968 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.881626ms" +ts=2024-05-02T12:17:24.292755803Z caller=http.go:194 level=debug traceID=5561dabecc3b8721 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.511915ms" +ts=2024-05-02T12:17:24.291992278Z caller=http.go:194 level=debug traceID=6c2496d2e17ca568 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.047302ms" +ts=2024-05-02T12:17:24.291868757Z caller=http.go:194 level=debug traceID=272c6e6ddfafb6af orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.793295ms" +ts=2024-05-02T12:17:24.29055718Z caller=http.go:194 level=debug traceID=08248e6765f04d66 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.603793ms" +ts=2024-05-02T12:17:24.289903439Z caller=http.go:194 level=debug traceID=402cfba88f6d4634 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.920256ms" +ts=2024-05-02T12:17:24.289369923Z caller=http.go:194 level=debug traceID=2819fb24af3c81a8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.627544ms" +ts=2024-05-02T12:17:24.287724492Z caller=http.go:194 level=debug traceID=29ccfe8cc4011969 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.328473ms" +ts=2024-05-02T12:17:24.287108798Z caller=http.go:194 level=debug traceID=073409fed804b111 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.968184ms" +ts=2024-05-02T12:17:24.287098313Z caller=http.go:194 level=debug traceID=410b7f7c31827fd7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.221763ms" +ts=2024-05-02T12:17:24.286978288Z caller=http.go:194 level=debug traceID=0a1dad1e4630b552 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.833197ms" +ts=2024-05-02T12:17:24.286957999Z caller=http.go:194 level=debug traceID=64e3fec931fcc91e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.197459ms" +ts=2024-05-02T12:17:24.286772145Z caller=http.go:194 level=debug traceID=3e4c29ceeda773b4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.976511ms" +ts=2024-05-02T12:17:24.28613392Z caller=http.go:194 level=debug traceID=73814ed4d785b3fc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.688437ms" +ts=2024-05-02T12:17:24.285961715Z caller=http.go:194 level=debug traceID=40b03c559bd9d584 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.66894ms" +ts=2024-05-02T12:17:24.285281208Z caller=http.go:194 level=debug traceID=6986b45a5d7cf39c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.523949ms" +ts=2024-05-02T12:17:24.284627635Z caller=http.go:194 level=debug traceID=272c6e6ddfafb6af orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.574087ms" +ts=2024-05-02T12:17:24.283220862Z caller=http.go:194 level=debug traceID=4abba9d6edc7590e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.88278ms" +ts=2024-05-02T12:17:24.281060593Z caller=http.go:194 level=debug traceID=5561dabecc3b8721 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.02012ms" +ts=2024-05-02T12:17:24.280615359Z caller=http.go:194 level=debug traceID=741d825060f33f7a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.143731ms" +ts=2024-05-02T12:17:24.279369027Z caller=http.go:194 level=debug traceID=18799862dcf330b5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.240598ms" +ts=2024-05-02T12:17:24.278882398Z caller=http.go:194 level=debug traceID=2819fb24af3c81a8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.168478ms" +ts=2024-05-02T12:17:24.278745297Z caller=http.go:194 level=debug traceID=71a3833f903fe2b8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.656054ms" +ts=2024-05-02T12:17:24.278465376Z caller=http.go:194 level=debug traceID=72c5fb412d2b890b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.534112ms" +ts=2024-05-02T12:17:24.277258569Z caller=http.go:194 level=debug traceID=402cfba88f6d4634 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.819431ms" +ts=2024-05-02T12:17:24.276552279Z caller=http.go:194 level=debug traceID=7e13be3cfb866fd6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.364513ms" +ts=2024-05-02T12:17:24.276330573Z caller=http.go:194 level=debug traceID=64e3fec931fcc91e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.825324ms" +ts=2024-05-02T12:17:24.276201231Z caller=http.go:194 level=debug traceID=73814ed4d785b3fc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.790282ms" +ts=2024-05-02T12:17:24.276151833Z caller=http.go:194 level=debug traceID=52590380475a99f2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.271214ms" +ts=2024-05-02T12:17:24.275572071Z caller=http.go:194 level=debug traceID=5125863a0b03850e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 48.239745ms" +ts=2024-05-02T12:17:24.27514043Z caller=http.go:194 level=debug traceID=0a1dad1e4630b552 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.594872ms" +ts=2024-05-02T12:17:24.272690406Z caller=http.go:194 level=debug traceID=3445dbd7609264db orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.505763ms" +ts=2024-05-02T12:17:24.271395412Z caller=http.go:194 level=debug traceID=04c30cee27d14be1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.725626ms" +ts=2024-05-02T12:17:24.271300602Z caller=http.go:194 level=debug traceID=2d4aa1e3891ae794 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 247.453µs" +ts=2024-05-02T12:17:24.269207084Z caller=http.go:194 level=debug traceID=239e4e2d27429910 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.950251ms" +ts=2024-05-02T12:17:24.269127625Z caller=http.go:194 level=debug traceID=741d825060f33f7a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.787486ms" +ts=2024-05-02T12:17:24.268698507Z caller=http.go:194 level=debug traceID=52590380475a99f2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.717071ms" +ts=2024-05-02T12:17:24.268905626Z caller=http.go:194 level=debug traceID=4c75e71091001d3e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.564696ms" +ts=2024-05-02T12:17:24.268794065Z caller=http.go:194 level=debug traceID=60521f112334ff47 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.048345ms" +ts=2024-05-02T12:17:24.268192874Z caller=http.go:194 level=debug traceID=18799862dcf330b5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.702665ms" +ts=2024-05-02T12:17:24.268120947Z caller=http.go:194 level=debug traceID=202a80ebb7c0f859 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.262502ms" +ts=2024-05-02T12:17:24.267549808Z caller=http.go:194 level=debug traceID=71a3833f903fe2b8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.609082ms" +ts=2024-05-02T12:17:24.267135436Z caller=http.go:194 level=debug traceID=18e22814d734e17f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.704885ms" +ts=2024-05-02T12:17:24.267102105Z caller=http.go:194 level=debug traceID=72c5fb412d2b890b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.85194ms" +ts=2024-05-02T12:17:24.266716323Z caller=http.go:194 level=debug traceID=28a924d8dcc06695 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.204328ms" +ts=2024-05-02T12:17:24.266738696Z caller=http.go:194 level=debug traceID=7e13be3cfb866fd6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.618238ms" +ts=2024-05-02T12:17:24.266637181Z caller=http.go:194 level=debug traceID=4f57d13855f2aa5b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.358567ms" +ts=2024-05-02T12:17:24.266462931Z caller=http.go:194 level=debug traceID=5ae0018b3e693942 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.351327ms" +ts=2024-05-02T12:17:24.265291991Z caller=http.go:194 level=debug traceID=725d6ef9a71f9e01 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.554468ms" +ts=2024-05-02T12:17:24.26523422Z caller=http.go:194 level=debug traceID=31c76d1c073819f9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.987436ms" +ts=2024-05-02T12:17:24.263491567Z caller=http.go:194 level=debug traceID=27427c2099ff5e22 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.033837ms" +ts=2024-05-02T12:17:24.263102816Z caller=http.go:194 level=debug traceID=2ae4146867c8f6cf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.613709ms" +ts=2024-05-02T12:17:24.262230869Z caller=http.go:194 level=debug traceID=6cc64dd65b7cb0db orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.281371ms" +ts=2024-05-02T12:17:24.261576653Z caller=http.go:194 level=debug traceID=3445dbd7609264db orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.756802ms" +ts=2024-05-02T12:17:24.261070518Z caller=http.go:194 level=debug traceID=0b4ab8cbe590dc14 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.348997ms" +ts=2024-05-02T12:17:24.260364876Z caller=http.go:194 level=debug traceID=04c30cee27d14be1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.53474ms" +ts=2024-05-02T12:17:24.259890362Z caller=http.go:194 level=debug traceID=2d4aa1e3891ae794 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 358.577µs" +ts=2024-05-02T12:17:24.259776008Z caller=http.go:194 level=debug traceID=5743a696fa4751d1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.654165ms" +ts=2024-05-02T12:17:24.25944307Z caller=http.go:194 level=debug traceID=202a80ebb7c0f859 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.507872ms" +ts=2024-05-02T12:17:24.259450376Z caller=http.go:194 level=debug traceID=40ea8d3783d8abb3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.487997ms" +ts=2024-05-02T12:17:24.258839727Z caller=http.go:194 level=debug traceID=239e4e2d27429910 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.220863ms" +ts=2024-05-02T12:17:24.258041687Z caller=http.go:194 level=debug traceID=4c75e71091001d3e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.760481ms" +ts=2024-05-02T12:17:24.257892966Z caller=http.go:194 level=debug traceID=4f57d13855f2aa5b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.201991ms" +ts=2024-05-02T12:17:24.257786655Z caller=http.go:194 level=debug traceID=7e8b648d3d2b24e0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.820152ms" +ts=2024-05-02T12:17:24.256833137Z caller=http.go:194 level=debug traceID=60521f112334ff47 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.638099ms" +ts=2024-05-02T12:17:24.256196033Z caller=http.go:194 level=debug traceID=725d6ef9a71f9e01 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.754286ms" +ts=2024-05-02T12:17:24.255870668Z caller=http.go:194 level=debug traceID=5ae0018b3e693942 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 19.49047ms" +ts=2024-05-02T12:17:24.255518384Z caller=http.go:194 level=debug traceID=31c76d1c073819f9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.210586ms" +ts=2024-05-02T12:17:24.254301893Z caller=http.go:194 level=debug traceID=18e22814d734e17f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.754605ms" +ts=2024-05-02T12:17:24.254206828Z caller=http.go:194 level=debug traceID=2c10fe48394c8820 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.562201ms" +ts=2024-05-02T12:17:24.253822941Z caller=http.go:194 level=debug traceID=171a41d08f071914 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.503682ms" +ts=2024-05-02T12:17:24.253821329Z caller=http.go:194 level=debug traceID=14334984d3a595de orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.803661ms" +ts=2024-05-02T12:17:24.253424799Z caller=http.go:194 level=debug traceID=3d82bd08d66992fd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.949808ms" +ts=2024-05-02T12:17:24.253269669Z caller=http.go:194 level=debug traceID=5d534af5e07a0ed9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.834034ms" +ts=2024-05-02T12:17:24.253007309Z caller=http.go:194 level=debug traceID=43eb5302cd5a7c17 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.273682ms" +ts=2024-05-02T12:17:24.252842701Z caller=http.go:194 level=debug traceID=5b23ce211295d6db orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.983949ms" +ts=2024-05-02T12:17:24.252472608Z caller=http.go:194 level=debug traceID=2ae4146867c8f6cf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.915909ms" +ts=2024-05-02T12:17:24.252243775Z caller=http.go:194 level=debug traceID=27427c2099ff5e22 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.121319ms" +ts=2024-05-02T12:17:24.251088773Z caller=http.go:194 level=debug traceID=28a924d8dcc06695 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.459067ms" +ts=2024-05-02T12:17:24.250310143Z caller=http.go:194 level=debug traceID=6cc64dd65b7cb0db orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.686547ms" +ts=2024-05-02T12:17:24.249144102Z caller=http.go:194 level=debug traceID=40ea8d3783d8abb3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.598608ms" +ts=2024-05-02T12:17:24.246987521Z caller=http.go:194 level=debug traceID=0b4ab8cbe590dc14 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.017206ms" +ts=2024-05-02T12:17:24.246526993Z caller=http.go:194 level=debug traceID=5743a696fa4751d1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.719803ms" +ts=2024-05-02T12:17:24.245143547Z caller=http.go:194 level=debug traceID=5d534af5e07a0ed9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.276144ms" +ts=2024-05-02T12:17:24.245128195Z caller=http.go:194 level=debug traceID=61a6a02e5b052a73 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.618168ms" +ts=2024-05-02T12:17:24.244930758Z caller=http.go:194 level=debug traceID=4492e1f3d32c76bc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.213358ms" +ts=2024-05-02T12:17:24.24482558Z caller=http.go:194 level=debug traceID=138f3580514b3d1e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.155845ms" +ts=2024-05-02T12:17:24.244542209Z caller=http.go:194 level=debug traceID=17df66e91ae1af96 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.12456ms" +ts=2024-05-02T12:17:24.24440463Z caller=http.go:194 level=debug traceID=3d82bd08d66992fd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.996972ms" +ts=2024-05-02T12:17:24.244299784Z caller=http.go:194 level=debug traceID=6222dc7ffb13e45e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.732903ms" +ts=2024-05-02T12:17:24.243933982Z caller=http.go:194 level=debug traceID=7e8b648d3d2b24e0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.10314ms" +ts=2024-05-02T12:17:24.243753676Z caller=http.go:194 level=debug traceID=14334984d3a595de orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.833405ms" +ts=2024-05-02T12:17:24.242959459Z caller=http.go:194 level=debug traceID=2c10fe48394c8820 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.399729ms" +ts=2024-05-02T12:17:24.242609319Z caller=http.go:194 level=debug traceID=050f303a319cc139 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.929434ms" +ts=2024-05-02T12:17:24.241597212Z caller=http.go:194 level=debug traceID=171a41d08f071914 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.598943ms" +ts=2024-05-02T12:17:24.24122688Z caller=http.go:194 level=debug traceID=43eb5302cd5a7c17 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.869086ms" +ts=2024-05-02T12:17:24.24114003Z caller=http.go:194 level=debug traceID=5b23ce211295d6db orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.967737ms" +ts=2024-05-02T12:17:24.241009893Z caller=http.go:194 level=debug traceID=731ccb8337f6c7a7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.830062ms" +ts=2024-05-02T12:17:24.241004814Z caller=http.go:194 level=debug traceID=1332c35df6823eef orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.299516ms" +ts=2024-05-02T12:17:24.240333071Z caller=http.go:194 level=debug traceID=73b66d537938c4bb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.904015ms" +ts=2024-05-02T12:17:24.239282717Z caller=http.go:194 level=debug traceID=486dab595e3fb834 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.695515ms" +ts=2024-05-02T12:17:24.23881876Z caller=http.go:194 level=debug traceID=79df4420b9b183a8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.707475ms" +ts=2024-05-02T12:17:24.237380172Z caller=http.go:194 level=debug traceID=74f04ca624df3595 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.745776ms" +ts=2024-05-02T12:17:24.235848313Z caller=http.go:194 level=debug traceID=50ba11100ab6d013 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.24851ms" +ts=2024-05-02T12:17:24.235840694Z caller=http.go:194 level=debug traceID=4c7b813cb21f1043 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.971236ms" +ts=2024-05-02T12:17:24.234690593Z caller=http.go:194 level=debug traceID=4492e1f3d32c76bc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.141558ms" +ts=2024-05-02T12:17:24.234616824Z caller=http.go:194 level=debug traceID=138f3580514b3d1e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.313538ms" +ts=2024-05-02T12:17:24.234214293Z caller=http.go:194 level=debug traceID=29717c7224777624 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.440238ms" +ts=2024-05-02T12:17:24.234198353Z caller=http.go:194 level=debug traceID=1f2f3a5bcfb4bdb1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.483408ms" +ts=2024-05-02T12:17:24.233680388Z caller=http.go:194 level=debug traceID=6222dc7ffb13e45e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.258146ms" +ts=2024-05-02T12:17:24.23339904Z caller=http.go:194 level=debug traceID=74573c23fed58ba9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.239008ms" +ts=2024-05-02T12:17:24.233311409Z caller=http.go:194 level=debug traceID=5bc23add31c026de orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.569228ms" +ts=2024-05-02T12:17:24.232884047Z caller=http.go:194 level=debug traceID=1332c35df6823eef orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.220335ms" +ts=2024-05-02T12:17:24.231322362Z caller=http.go:194 level=debug traceID=5051ca03dbc920d8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.679697ms" +ts=2024-05-02T12:17:24.230990914Z caller=http.go:194 level=debug traceID=5125863a0b03850e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 16.197508ms" +ts=2024-05-02T12:17:24.230754127Z caller=http.go:194 level=debug traceID=61a6a02e5b052a73 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.3183ms" +ts=2024-05-02T12:17:24.230739648Z caller=http.go:194 level=debug traceID=050f303a319cc139 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.551917ms" +ts=2024-05-02T12:17:24.230405079Z caller=http.go:194 level=debug traceID=17df66e91ae1af96 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.489348ms" +ts=2024-05-02T12:17:24.230158229Z caller=http.go:194 level=debug traceID=5667d5d90f438f9a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.33198ms" +ts=2024-05-02T12:17:24.229664911Z caller=http.go:194 level=debug traceID=0157b395f2d931a8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.824299ms" +ts=2024-05-02T12:17:24.229302639Z caller=http.go:194 level=debug traceID=731ccb8337f6c7a7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.458826ms" +ts=2024-05-02T12:17:24.228699103Z caller=http.go:194 level=debug traceID=73b66d537938c4bb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.356287ms" +ts=2024-05-02T12:17:24.228564295Z caller=http.go:194 level=debug traceID=79df4420b9b183a8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.414554ms" +ts=2024-05-02T12:17:24.228339402Z caller=http.go:194 level=debug traceID=486dab595e3fb834 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.436492ms" +ts=2024-05-02T12:17:24.228219323Z caller=http.go:194 level=debug traceID=5dff524905c7db7b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.63844ms" +ts=2024-05-02T12:17:24.227492199Z caller=http.go:194 level=debug traceID=6a9c7bcfedb2e5b0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.677714ms" +ts=2024-05-02T12:17:24.226777449Z caller=http.go:194 level=debug traceID=74f04ca624df3595 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.700221ms" +ts=2024-05-02T12:17:24.226811535Z caller=http.go:194 level=debug traceID=7102aee22ec638b4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.064288ms" +ts=2024-05-02T12:17:24.226339258Z caller=http.go:194 level=debug traceID=58a57d60f4acadca orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.461471ms" +ts=2024-05-02T12:17:24.225158268Z caller=http.go:194 level=debug traceID=7efaaf55d0a2b92d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.105119ms" +ts=2024-05-02T12:17:24.225170709Z caller=http.go:194 level=debug traceID=50ba11100ab6d013 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.643046ms" +ts=2024-05-02T12:17:24.22496251Z caller=http.go:194 level=debug traceID=1ddb2f4a4c91f431 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.530789ms" +ts=2024-05-02T12:17:24.224109812Z caller=http.go:194 level=debug traceID=6b608d64431ea3dc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.291619ms" +ts=2024-05-02T12:17:24.223414208Z caller=http.go:194 level=debug traceID=5bc23add31c026de orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.674192ms" +ts=2024-05-02T12:17:24.222706885Z caller=http.go:194 level=debug traceID=13b5cf4dd6cce688 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.968427ms" +ts=2024-05-02T12:17:24.222459348Z caller=http.go:194 level=debug traceID=262dd3dc293fb01b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.751161ms" +ts=2024-05-02T12:17:24.221891787Z caller=http.go:194 level=debug traceID=4c7b813cb21f1043 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.203324ms" +ts=2024-05-02T12:17:24.221586709Z caller=http.go:194 level=debug traceID=29717c7224777624 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.944662ms" +ts=2024-05-02T12:17:24.221737627Z caller=http.go:194 level=debug traceID=74573c23fed58ba9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.202085ms" +ts=2024-05-02T12:17:24.221181415Z caller=http.go:194 level=debug traceID=5051ca03dbc920d8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.152054ms" +ts=2024-05-02T12:17:24.219982051Z caller=http.go:194 level=debug traceID=5667d5d90f438f9a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.561978ms" +ts=2024-05-02T12:17:24.219437791Z caller=http.go:194 level=debug traceID=1f2f3a5bcfb4bdb1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.491041ms" +ts=2024-05-02T12:17:24.21852717Z caller=http.go:194 level=debug traceID=0157b395f2d931a8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.810462ms" +ts=2024-05-02T12:17:24.218485318Z caller=http.go:194 level=debug traceID=0db06340711805d0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.460716ms" +ts=2024-05-02T12:17:24.218412559Z caller=http.go:194 level=debug traceID=5dff524905c7db7b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.288689ms" +ts=2024-05-02T12:17:24.218367991Z caller=http.go:194 level=debug traceID=183212379612db34 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.035865ms" +ts=2024-05-02T12:17:24.217918471Z caller=http.go:194 level=debug traceID=4bc6a097566b87c2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.847043ms" +ts=2024-05-02T12:17:24.216436122Z caller=http.go:194 level=debug traceID=6a9c7bcfedb2e5b0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.83366ms" +ts=2024-05-02T12:17:24.216507081Z caller=http.go:194 level=debug traceID=58a57d60f4acadca orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.664012ms" +ts=2024-05-02T12:17:24.215791064Z caller=http.go:194 level=debug traceID=5d2528fe904f5c43 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.466876ms" +ts=2024-05-02T12:17:24.215696822Z caller=http.go:194 level=debug traceID=5a9865b474f274c7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.56924ms" +ts=2024-05-02T12:17:24.215720365Z caller=http.go:194 level=debug traceID=7102aee22ec638b4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.071135ms" +ts=2024-05-02T12:17:24.215001347Z caller=http.go:194 level=debug traceID=189c07b1b782caea orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.312847ms" +ts=2024-05-02T12:17:24.214589759Z caller=http.go:194 level=debug traceID=700018ab703eef1b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 246.936µs" +ts=2024-05-02T12:17:24.213109965Z caller=http.go:194 level=debug traceID=6b608d64431ea3dc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.978298ms" +ts=2024-05-02T12:17:24.213196401Z caller=http.go:194 level=debug traceID=7efaaf55d0a2b92d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.890504ms" +ts=2024-05-02T12:17:24.213163958Z caller=http.go:194 level=debug traceID=13b5cf4dd6cce688 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.293891ms" +ts=2024-05-02T12:17:24.21304036Z caller=http.go:194 level=debug traceID=1ddb2f4a4c91f431 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.990599ms" +ts=2024-05-02T12:17:24.212439907Z caller=http.go:194 level=debug traceID=30de5975494581cd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.787486ms" +ts=2024-05-02T12:17:24.211792689Z caller=http.go:194 level=debug traceID=38519d87e18760f8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 13.840353ms" +ts=2024-05-02T12:17:24.211595813Z caller=http.go:194 level=debug traceID=78af71e6ce6ac1ec orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.955303ms" +ts=2024-05-02T12:17:24.211481016Z caller=http.go:194 level=debug traceID=44d7da3211669774 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 280.83µs" +ts=2024-05-02T12:17:24.210647683Z caller=http.go:194 level=debug traceID=262dd3dc293fb01b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.63266ms" +ts=2024-05-02T12:17:24.209751364Z caller=http.go:194 level=debug traceID=354d10dcab9dca3c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.887413ms" +ts=2024-05-02T12:17:24.209515978Z caller=http.go:194 level=debug traceID=03ac5f8eb2d7b491 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.470835ms" +ts=2024-05-02T12:17:24.209205882Z caller=http.go:194 level=debug traceID=3a55d2a3cbc2d004 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.131113ms" +ts=2024-05-02T12:17:24.208933697Z caller=http.go:194 level=debug traceID=183212379612db34 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.409325ms" +ts=2024-05-02T12:17:24.208773683Z caller=http.go:194 level=debug traceID=4bc6a097566b87c2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.955704ms" +ts=2024-05-02T12:17:24.207394759Z caller=http.go:194 level=debug traceID=182afae4949a6c62 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.481469ms" +ts=2024-05-02T12:17:24.207259235Z caller=http.go:194 level=debug traceID=0db06340711805d0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.73634ms" +ts=2024-05-02T12:17:24.206436613Z caller=http.go:194 level=debug traceID=5608b37ead388daa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.51821ms" +ts=2024-05-02T12:17:24.206181516Z caller=http.go:194 level=debug traceID=3243847d82a44476 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.089661ms" +ts=2024-05-02T12:17:24.205126463Z caller=http.go:194 level=debug traceID=38fd4fc3b6dae99c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.166661ms" +ts=2024-05-02T12:17:24.20497232Z caller=http.go:194 level=debug traceID=0c30ec50fec3faaf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.682384ms" +ts=2024-05-02T12:17:24.205081311Z caller=http.go:194 level=debug traceID=189c07b1b782caea orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.441074ms" +ts=2024-05-02T12:17:24.204861091Z caller=http.go:194 level=debug traceID=03ac5f8eb2d7b491 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.168797ms" +ts=2024-05-02T12:17:24.204485592Z caller=http.go:194 level=debug traceID=4339e162403155e7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.438117ms" +ts=2024-05-02T12:17:24.203764319Z caller=http.go:194 level=debug traceID=5d2528fe904f5c43 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.16377ms" +ts=2024-05-02T12:17:24.203557085Z caller=http.go:194 level=debug traceID=700018ab703eef1b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 362.948µs" +ts=2024-05-02T12:17:24.203408658Z caller=http.go:194 level=debug traceID=5a9865b474f274c7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.905987ms" +ts=2024-05-02T12:17:24.202945777Z caller=http.go:194 level=debug traceID=71041cd7b27607c5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.743274ms" +ts=2024-05-02T12:17:24.202772345Z caller=http.go:194 level=debug traceID=279c3f3929d32548 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 22.37284ms" +ts=2024-05-02T12:17:24.202864244Z caller=http.go:194 level=debug traceID=01b525c26b91f426 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.1619ms" +ts=2024-05-02T12:17:24.201983567Z caller=http.go:194 level=debug traceID=44d7da3211669774 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 483.117µs" +ts=2024-05-02T12:17:24.201855142Z caller=http.go:194 level=debug traceID=6727623c0313bee3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.528663ms" +ts=2024-05-02T12:17:24.201732277Z caller=http.go:194 level=debug traceID=41bff8663ed3cf2b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 327.365µs" +ts=2024-05-02T12:17:24.201372829Z caller=http.go:194 level=debug traceID=0197caf4bdcc88a4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.418587ms" +ts=2024-05-02T12:17:24.200412326Z caller=http.go:194 level=debug traceID=30de5975494581cd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.013489ms" +ts=2024-05-02T12:17:24.200307494Z caller=http.go:194 level=debug traceID=354d10dcab9dca3c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.673228ms" +ts=2024-05-02T12:17:24.200278002Z caller=http.go:194 level=debug traceID=78af71e6ce6ac1ec orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.376457ms" +ts=2024-05-02T12:17:24.199825617Z caller=http.go:194 level=debug traceID=77ed198bcefbf89a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.066414ms" +ts=2024-05-02T12:17:24.199888355Z caller=http.go:194 level=debug traceID=38519d87e18760f8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.109579ms" +ts=2024-05-02T12:17:24.199397293Z caller=http.go:194 level=debug traceID=525a5bcfc5c04043 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.630659ms" +ts=2024-05-02T12:17:24.198991064Z caller=http.go:194 level=debug traceID=4b8660a131226549 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.151279ms" +ts=2024-05-02T12:17:24.198248478Z caller=http.go:194 level=debug traceID=659afe0c0aa24a29 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.27278ms" +ts=2024-05-02T12:17:24.19758135Z caller=http.go:194 level=debug traceID=3a55d2a3cbc2d004 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.916245ms" +ts=2024-05-02T12:17:24.196960318Z caller=http.go:194 level=debug traceID=182afae4949a6c62 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.450498ms" +ts=2024-05-02T12:17:24.195163014Z caller=http.go:194 level=debug traceID=41bff8663ed3cf2b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 351.35µs" +ts=2024-05-02T12:17:24.194901082Z caller=http.go:194 level=debug traceID=38fd4fc3b6dae99c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.947046ms" +ts=2024-05-02T12:17:24.1948973Z caller=http.go:194 level=debug traceID=79b75dddd2a892bf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.38363ms" +ts=2024-05-02T12:17:24.194602012Z caller=http.go:194 level=debug traceID=778c38f1e27f7f36 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.723232ms" +ts=2024-05-02T12:17:24.194525442Z caller=http.go:194 level=debug traceID=3243847d82a44476 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.801415ms" +ts=2024-05-02T12:17:24.194482261Z caller=http.go:194 level=debug traceID=5608b37ead388daa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.83281ms" +ts=2024-05-02T12:17:24.194449435Z caller=http.go:194 level=debug traceID=4339e162403155e7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.872288ms" +ts=2024-05-02T12:17:24.194361492Z caller=http.go:194 level=debug traceID=4168ee93e7a6c068 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 265.247µs" +ts=2024-05-02T12:17:24.192917213Z caller=http.go:194 level=debug traceID=0f5067af5516848c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.720991ms" +ts=2024-05-02T12:17:24.192604223Z caller=http.go:194 level=debug traceID=01b525c26b91f426 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.01221ms" +ts=2024-05-02T12:17:24.192176662Z caller=http.go:194 level=debug traceID=505e3c2793f115d9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.83427ms" +ts=2024-05-02T12:17:24.19138226Z caller=http.go:194 level=debug traceID=62877ecfba1a64f1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.277153ms" +ts=2024-05-02T12:17:24.191209485Z caller=http.go:194 level=debug traceID=0c30ec50fec3faaf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.761596ms" +ts=2024-05-02T12:17:24.190691632Z caller=http.go:194 level=debug traceID=71041cd7b27607c5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.128469ms" +ts=2024-05-02T12:17:24.190247166Z caller=http.go:194 level=debug traceID=0197caf4bdcc88a4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.198094ms" +ts=2024-05-02T12:17:24.1902765Z caller=http.go:194 level=debug traceID=0e2d2aa69a5e3175 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.27018ms" +ts=2024-05-02T12:17:24.189807092Z caller=http.go:194 level=debug traceID=2a262ba75a5eb20d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.658075ms" +ts=2024-05-02T12:17:24.1897398Z caller=http.go:194 level=debug traceID=6727623c0313bee3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.155837ms" +ts=2024-05-02T12:17:24.189473509Z caller=http.go:194 level=debug traceID=525a5bcfc5c04043 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.327824ms" +ts=2024-05-02T12:17:24.188433151Z caller=http.go:194 level=debug traceID=7da7484eb711d53a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.800803ms" +ts=2024-05-02T12:17:24.188112501Z caller=http.go:194 level=debug traceID=45a8965ed5489781 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.641188ms" +ts=2024-05-02T12:17:24.188027238Z caller=http.go:194 level=debug traceID=4b8660a131226549 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.438692ms" +ts=2024-05-02T12:17:24.186649798Z caller=http.go:194 level=debug traceID=659afe0c0aa24a29 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.250074ms" +ts=2024-05-02T12:17:24.185850307Z caller=http.go:194 level=debug traceID=77ed198bcefbf89a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.775105ms" +ts=2024-05-02T12:17:24.18566243Z caller=http.go:194 level=debug traceID=37890238a10553aa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.090301ms" +ts=2024-05-02T12:17:24.185386075Z caller=http.go:194 level=debug traceID=79b75dddd2a892bf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.206535ms" +ts=2024-05-02T12:17:24.185015759Z caller=http.go:194 level=debug traceID=279c3f3929d32548 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 16.336362ms" +ts=2024-05-02T12:17:24.184234768Z caller=http.go:194 level=debug traceID=30d06b291c42ca74 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.620561ms" +ts=2024-05-02T12:17:24.183258268Z caller=http.go:194 level=debug traceID=4168ee93e7a6c068 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 445.729µs" +ts=2024-05-02T12:17:24.182740739Z caller=http.go:194 level=debug traceID=111de5953ceb4150 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.759636ms" +ts=2024-05-02T12:17:24.182858523Z caller=http.go:194 level=debug traceID=41313afa85a29280 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.689815ms" +ts=2024-05-02T12:17:24.182414452Z caller=http.go:194 level=debug traceID=0b4140976818b8e7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.598298ms" +ts=2024-05-02T12:17:24.182304012Z caller=http.go:194 level=debug traceID=505e3c2793f115d9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.012552ms" +ts=2024-05-02T12:17:24.18186909Z caller=http.go:194 level=debug traceID=1cc6c63e03a08621 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.181477ms" +ts=2024-05-02T12:17:24.181114914Z caller=http.go:194 level=debug traceID=778c38f1e27f7f36 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.120227ms" +ts=2024-05-02T12:17:24.181013407Z caller=http.go:194 level=debug traceID=0f5067af5516848c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.526175ms" +ts=2024-05-02T12:17:24.180933733Z caller=http.go:194 level=debug traceID=463e1748e39a45b5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.654664ms" +ts=2024-05-02T12:17:24.180443086Z caller=http.go:194 level=debug traceID=62877ecfba1a64f1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.027519ms" +ts=2024-05-02T12:17:24.179498105Z caller=http.go:194 level=debug traceID=09a04cafccb96ac9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.618274ms" +ts=2024-05-02T12:17:24.179175486Z caller=http.go:194 level=debug traceID=0e2d2aa69a5e3175 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.919623ms" +ts=2024-05-02T12:17:24.178154915Z caller=http.go:194 level=debug traceID=2a262ba75a5eb20d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.578543ms" +ts=2024-05-02T12:17:24.177682871Z caller=http.go:194 level=debug traceID=45a8965ed5489781 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.159632ms" +ts=2024-05-02T12:17:24.176792197Z caller=http.go:194 level=debug traceID=7da7484eb711d53a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.713129ms" +ts=2024-05-02T12:17:24.176845711Z caller=http.go:194 level=debug traceID=03fe68768eaf93e8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.648741ms" +ts=2024-05-02T12:17:24.176447988Z caller=http.go:194 level=debug traceID=4de8aa9b91018901 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.517154ms" +ts=2024-05-02T12:17:24.176372433Z caller=http.go:194 level=debug traceID=170548fd3533aaf0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.657937ms" +ts=2024-05-02T12:17:24.176028338Z caller=http.go:194 level=debug traceID=297b5a149dc20a3a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.840615ms" +ts=2024-05-02T12:17:24.174387676Z caller=http.go:194 level=debug traceID=30d06b291c42ca74 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.525855ms" +ts=2024-05-02T12:17:24.174258615Z caller=http.go:194 level=debug traceID=01b6311206dafe51 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.090516ms" +ts=2024-05-02T12:17:24.174008317Z caller=http.go:194 level=debug traceID=37890238a10553aa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.672882ms" +ts=2024-05-02T12:17:24.173760297Z caller=http.go:194 level=debug traceID=7c37fa99939a7bed orgID=3648 msg="POST /push.v1.PusherService/Push (200) 354.479µs" +ts=2024-05-02T12:17:24.173444388Z caller=http.go:194 level=debug traceID=19d1ad4f91a2102f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.670593ms" +ts=2024-05-02T12:17:24.17324662Z caller=http.go:194 level=debug traceID=41313afa85a29280 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.943219ms" +ts=2024-05-02T12:17:24.17317855Z caller=http.go:194 level=debug traceID=21f6ea17f9ffd75b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.467742ms" +ts=2024-05-02T12:17:24.172435264Z caller=http.go:194 level=debug traceID=364c5ff88d4377cc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.617212ms" +ts=2024-05-02T12:17:24.172220756Z caller=http.go:194 level=debug traceID=26bde94b96005f4a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.571449ms" +ts=2024-05-02T12:17:24.172178248Z caller=http.go:194 level=debug traceID=0b4140976818b8e7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.446687ms" +ts=2024-05-02T12:17:24.170883724Z caller=http.go:194 level=debug traceID=111de5953ceb4150 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.626463ms" +ts=2024-05-02T12:17:24.169912403Z caller=http.go:194 level=debug traceID=01b6311206dafe51 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.460929ms" +ts=2024-05-02T12:17:24.169786817Z caller=http.go:194 level=debug traceID=463e1748e39a45b5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.286494ms" +ts=2024-05-02T12:17:24.169662821Z caller=http.go:194 level=debug traceID=1cc6c63e03a08621 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.952539ms" +ts=2024-05-02T12:17:24.169443232Z caller=http.go:194 level=debug traceID=7e9d8e6d54eac78f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.229597ms" +ts=2024-05-02T12:17:24.16859915Z caller=http.go:194 level=debug traceID=342c7ec1a3436674 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.610401ms" +ts=2024-05-02T12:17:24.168371962Z caller=http.go:194 level=debug traceID=09a04cafccb96ac9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.680444ms" +ts=2024-05-02T12:17:24.167802941Z caller=http.go:194 level=debug traceID=2d3d6dbdc83b898a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.342563ms" +ts=2024-05-02T12:17:24.167829874Z caller=http.go:194 level=debug traceID=3d393194af2cbda6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.051514ms" +ts=2024-05-02T12:17:24.16714778Z caller=http.go:194 level=debug traceID=54fb4558696bdf19 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.106492ms" +ts=2024-05-02T12:17:24.166730081Z caller=http.go:194 level=debug traceID=1f4ce215e11b0aab orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.430965ms" +ts=2024-05-02T12:17:24.166315369Z caller=http.go:194 level=debug traceID=4de8aa9b91018901 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.153135ms" +ts=2024-05-02T12:17:24.165813152Z caller=http.go:194 level=debug traceID=16ea7ec13da43a6d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.893256ms" +ts=2024-05-02T12:17:24.16537563Z caller=http.go:194 level=debug traceID=49e0974f48eb9bb7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.188087ms" +ts=2024-05-02T12:17:24.165681482Z caller=http.go:194 level=debug traceID=03fe68768eaf93e8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.557043ms" +ts=2024-05-02T12:17:24.165273202Z caller=http.go:194 level=debug traceID=5eab4973a6ce7d75 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.888155ms" +ts=2024-05-02T12:17:24.165013438Z caller=http.go:194 level=debug traceID=19d1ad4f91a2102f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.178317ms" +ts=2024-05-02T12:17:24.164841066Z caller=http.go:194 level=debug traceID=170548fd3533aaf0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.654145ms" +ts=2024-05-02T12:17:24.164151306Z caller=http.go:194 level=debug traceID=297b5a149dc20a3a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.132696ms" +ts=2024-05-02T12:17:24.163770187Z caller=http.go:194 level=debug traceID=1ea349bfd3ab098a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.267746ms" +ts=2024-05-02T12:17:24.163763576Z caller=http.go:194 level=debug traceID=1f8779198a8bff96 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.247794ms" +ts=2024-05-02T12:17:24.163084443Z caller=http.go:194 level=debug traceID=26bde94b96005f4a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.907666ms" +ts=2024-05-02T12:17:24.163174253Z caller=http.go:194 level=debug traceID=21f6ea17f9ffd75b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.641513ms" +ts=2024-05-02T12:17:24.162515514Z caller=http.go:194 level=debug traceID=65ad208f53787ddc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.668428ms" +ts=2024-05-02T12:17:24.162263058Z caller=http.go:194 level=debug traceID=7c37fa99939a7bed orgID=1218 msg="POST /push.v1.PusherService/Push (200) 434.335µs" +ts=2024-05-02T12:17:24.162093469Z caller=http.go:194 level=debug traceID=60302a5236c026d7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.39003ms" +ts=2024-05-02T12:17:24.161340379Z caller=http.go:194 level=debug traceID=364c5ff88d4377cc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.638761ms" +ts=2024-05-02T12:17:24.160992666Z caller=http.go:194 level=debug traceID=338eb74976db90d4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.294307ms" +ts=2024-05-02T12:17:24.15960175Z caller=http.go:194 level=debug traceID=5dc9880a2613cae9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.826772ms" +ts=2024-05-02T12:17:24.158122685Z caller=http.go:194 level=debug traceID=76b373b480e42744 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.155066ms" +ts=2024-05-02T12:17:24.157964148Z caller=http.go:194 level=debug traceID=342c7ec1a3436674 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.294193ms" +ts=2024-05-02T12:17:24.157939194Z caller=http.go:194 level=debug traceID=7e9d8e6d54eac78f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.864697ms" +ts=2024-05-02T12:17:24.157695794Z caller=http.go:194 level=debug traceID=1f4ce215e11b0aab orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.935017ms" +ts=2024-05-02T12:17:24.157572853Z caller=http.go:194 level=debug traceID=16ea7ec13da43a6d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.741196ms" +ts=2024-05-02T12:17:24.157173582Z caller=http.go:194 level=debug traceID=3d393194af2cbda6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.34949ms" +ts=2024-05-02T12:17:24.15719611Z caller=http.go:194 level=debug traceID=6a5253e6c4c99aa2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.894023ms" +ts=2024-05-02T12:17:24.156379951Z caller=http.go:194 level=debug traceID=0c4b3d68bd8793e6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.009297ms" +ts=2024-05-02T12:17:24.155970043Z caller=http.go:194 level=debug traceID=49e0974f48eb9bb7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.981378ms" +ts=2024-05-02T12:17:24.15592791Z caller=http.go:194 level=debug traceID=38db7b33a5c5d567 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.841144ms" +ts=2024-05-02T12:17:24.155650707Z caller=http.go:194 level=debug traceID=54fb4558696bdf19 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.792134ms" +ts=2024-05-02T12:17:24.155621336Z caller=http.go:194 level=debug traceID=2ce177b3407b1ca3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.187713ms" +ts=2024-05-02T12:17:24.154907914Z caller=http.go:194 level=debug traceID=244ae46ae8ba5e21 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.659071ms" +ts=2024-05-02T12:17:24.154558033Z caller=http.go:194 level=debug traceID=2d3d6dbdc83b898a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.773529ms" +ts=2024-05-02T12:17:24.154430972Z caller=http.go:194 level=debug traceID=5eab4973a6ce7d75 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.37713ms" +ts=2024-05-02T12:17:24.153949973Z caller=http.go:194 level=debug traceID=1f8779198a8bff96 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.001402ms" +ts=2024-05-02T12:17:24.153810124Z caller=http.go:194 level=debug traceID=5dc9880a2613cae9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.117115ms" +ts=2024-05-02T12:17:24.153044923Z caller=http.go:194 level=debug traceID=3784baf0eed07c03 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.478838ms" +ts=2024-05-02T12:17:24.15183769Z caller=http.go:194 level=debug traceID=60854429a8788f25 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.208457ms" +ts=2024-05-02T12:17:24.151662493Z caller=http.go:194 level=debug traceID=65ad208f53787ddc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.386272ms" +ts=2024-05-02T12:17:24.151547698Z caller=http.go:194 level=debug traceID=2ecb0c57f32ae8c3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.639622ms" +ts=2024-05-02T12:17:24.150685224Z caller=http.go:194 level=debug traceID=60302a5236c026d7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.612091ms" +ts=2024-05-02T12:17:24.15011388Z caller=http.go:194 level=debug traceID=1ea349bfd3ab098a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.9649ms" +ts=2024-05-02T12:17:24.149601102Z caller=http.go:194 level=debug traceID=4b597f39b3a3ed78 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.861465ms" +ts=2024-05-02T12:17:24.14853809Z caller=http.go:194 level=debug traceID=338eb74976db90d4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.130584ms" +ts=2024-05-02T12:17:24.148280319Z caller=http.go:194 level=debug traceID=0ffb2673812b3cf3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.942447ms" +ts=2024-05-02T12:17:24.14746657Z caller=http.go:194 level=debug traceID=3ded81ddf1265747 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.311227ms" +ts=2024-05-02T12:17:24.146855201Z caller=http.go:194 level=debug traceID=38db7b33a5c5d567 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.171859ms" +ts=2024-05-02T12:17:24.146411215Z caller=http.go:194 level=debug traceID=76b373b480e42744 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.137242ms" +ts=2024-05-02T12:17:24.146101548Z caller=http.go:194 level=debug traceID=0c4b3d68bd8793e6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.998471ms" +ts=2024-05-02T12:17:24.146002824Z caller=http.go:194 level=debug traceID=318c990ebacdfff4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.60771ms" +ts=2024-05-02T12:17:24.145793827Z caller=http.go:194 level=debug traceID=6a5253e6c4c99aa2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.657831ms" +ts=2024-05-02T12:17:24.145552349Z caller=http.go:194 level=debug traceID=52d4b93d8e51d0e5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.599639ms" +ts=2024-05-02T12:17:24.144984998Z caller=http.go:194 level=debug traceID=2ce177b3407b1ca3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.000401ms" +ts=2024-05-02T12:17:24.144879778Z caller=http.go:194 level=debug traceID=1fb640d92efe1761 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.387906ms" +ts=2024-05-02T12:17:24.144510286Z caller=http.go:194 level=debug traceID=09a74094d21a06eb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.24859ms" +ts=2024-05-02T12:17:24.143950232Z caller=http.go:194 level=debug traceID=5f7b477c492707d8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.806925ms" +ts=2024-05-02T12:17:24.143449033Z caller=http.go:194 level=debug traceID=244ae46ae8ba5e21 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.540745ms" +ts=2024-05-02T12:17:24.143201841Z caller=http.go:194 level=debug traceID=53a75beeb9109625 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.465222ms" +ts=2024-05-02T12:17:24.142983678Z caller=http.go:194 level=debug traceID=0cd953f5be85e236 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.456576ms" +ts=2024-05-02T12:17:24.141931109Z caller=http.go:194 level=debug traceID=3784baf0eed07c03 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.834622ms" +ts=2024-05-02T12:17:24.141120601Z caller=http.go:194 level=debug traceID=60854429a8788f25 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.870227ms" +ts=2024-05-02T12:17:24.140615511Z caller=http.go:194 level=debug traceID=2ecb0c57f32ae8c3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.979926ms" +ts=2024-05-02T12:17:24.139361706Z caller=http.go:194 level=debug traceID=4b597f39b3a3ed78 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.603564ms" +ts=2024-05-02T12:17:24.138474704Z caller=http.go:194 level=debug traceID=075d33286f1f145a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.129254ms" +ts=2024-05-02T12:17:24.13826991Z caller=http.go:194 level=debug traceID=3ded81ddf1265747 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.152966ms" +ts=2024-05-02T12:17:24.137549777Z caller=http.go:194 level=debug traceID=0ffb2673812b3cf3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.81868ms" +ts=2024-05-02T12:17:24.137260864Z caller=http.go:194 level=debug traceID=318c990ebacdfff4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.309973ms" +ts=2024-05-02T12:17:24.136768558Z caller=http.go:194 level=debug traceID=65c93678c05d2068 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.342ms" +ts=2024-05-02T12:17:24.136739956Z caller=http.go:194 level=debug traceID=25b66f71d35449e1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.803822ms" +ts=2024-05-02T12:17:24.136544349Z caller=http.go:194 level=debug traceID=6d8a0756828ecc96 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.972879ms" +ts=2024-05-02T12:17:24.136269591Z caller=http.go:194 level=debug traceID=7ddbdfb0c5beb4fa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.304563ms" +ts=2024-05-02T12:17:24.135534452Z caller=http.go:194 level=debug traceID=13f088874b1bef36 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 314.778µs" +ts=2024-05-02T12:17:24.134795139Z caller=http.go:194 level=debug traceID=52d4b93d8e51d0e5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.659415ms" +ts=2024-05-02T12:17:24.134801781Z caller=http.go:194 level=debug traceID=1fd68abd1e49641c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.532147ms" +ts=2024-05-02T12:17:24.134772702Z caller=http.go:194 level=debug traceID=1fb640d92efe1761 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.690528ms" +ts=2024-05-02T12:17:24.133564587Z caller=http.go:194 level=debug traceID=14826d86a7b8fd04 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 48.120839ms" +ts=2024-05-02T12:17:24.133106401Z caller=http.go:194 level=debug traceID=7633926805d07b01 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 232.861µs" +ts=2024-05-02T12:17:24.132642444Z caller=http.go:194 level=debug traceID=7108f6aa459efdd6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.358782ms" +ts=2024-05-02T12:17:24.132464548Z caller=http.go:194 level=debug traceID=53a75beeb9109625 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.558533ms" +ts=2024-05-02T12:17:24.132197487Z caller=http.go:194 level=debug traceID=5f7b477c492707d8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.036602ms" +ts=2024-05-02T12:17:24.131905618Z caller=http.go:194 level=debug traceID=09a74094d21a06eb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.686481ms" +ts=2024-05-02T12:17:24.131599678Z caller=http.go:194 level=debug traceID=0cd953f5be85e236 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.132086ms" +ts=2024-05-02T12:17:24.131247793Z caller=http.go:194 level=debug traceID=426cf919bde0abe9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.097577ms" +ts=2024-05-02T12:17:24.130517433Z caller=http.go:194 level=debug traceID=44222c041f02c2c6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.821955ms" +ts=2024-05-02T12:17:24.130410148Z caller=http.go:194 level=debug traceID=075d33286f1f145a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.672264ms" +ts=2024-05-02T12:17:24.128939865Z caller=http.go:194 level=debug traceID=1a958dd3de158eb7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.304752ms" +ts=2024-05-02T12:17:24.12890336Z caller=http.go:194 level=debug traceID=7ddbdfb0c5beb4fa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.199869ms" +ts=2024-05-02T12:17:24.126994308Z caller=http.go:194 level=debug traceID=47636695cdc1b790 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.687654ms" +ts=2024-05-02T12:17:24.12657399Z caller=http.go:194 level=debug traceID=50d3763ee9ed6fd4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.503751ms" +ts=2024-05-02T12:17:24.126409029Z caller=http.go:194 level=debug traceID=283176ee9e882784 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.158817ms" +ts=2024-05-02T12:17:24.125954589Z caller=http.go:194 level=debug traceID=25b66f71d35449e1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.933554ms" +ts=2024-05-02T12:17:24.125406049Z caller=http.go:194 level=debug traceID=65c93678c05d2068 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.458495ms" +ts=2024-05-02T12:17:24.124941398Z caller=http.go:194 level=debug traceID=6d8a0756828ecc96 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.142349ms" +ts=2024-05-02T12:17:24.124169027Z caller=http.go:194 level=debug traceID=3810c6d7e0062635 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.584488ms" +ts=2024-05-02T12:17:24.124072278Z caller=http.go:194 level=debug traceID=13f088874b1bef36 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 311.574µs" +ts=2024-05-02T12:17:24.123967455Z caller=http.go:194 level=debug traceID=5645e7739896355b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.804963ms" +ts=2024-05-02T12:17:24.123978731Z caller=http.go:194 level=debug traceID=1fd68abd1e49641c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.1117ms" +ts=2024-05-02T12:17:24.12327818Z caller=http.go:194 level=debug traceID=7d7abf17c821882f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.357543ms" +ts=2024-05-02T12:17:24.123032157Z caller=http.go:194 level=debug traceID=63172d2db41bbe93 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.921693ms" +ts=2024-05-02T12:17:24.122331389Z caller=http.go:194 level=debug traceID=1204b0383e6d041f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.776925ms" +ts=2024-05-02T12:17:24.122285496Z caller=http.go:194 level=debug traceID=7633926805d07b01 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 514.781µs" +ts=2024-05-02T12:17:24.121168998Z caller=http.go:194 level=debug traceID=426cf919bde0abe9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.668449ms" +ts=2024-05-02T12:17:24.121172633Z caller=http.go:194 level=debug traceID=7108f6aa459efdd6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.312769ms" +ts=2024-05-02T12:17:24.120749376Z caller=http.go:194 level=debug traceID=14881f30c0491c55 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.061228ms" +ts=2024-05-02T12:17:24.120246217Z caller=http.go:194 level=debug traceID=768dda10525d960c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.027143ms" +ts=2024-05-02T12:17:24.119458476Z caller=http.go:194 level=debug traceID=44222c041f02c2c6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.946216ms" +ts=2024-05-02T12:17:24.11847724Z caller=http.go:194 level=debug traceID=7df09b3dba83e96c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.382947ms" +ts=2024-05-02T12:17:24.118318714Z caller=http.go:194 level=debug traceID=6365d3db496db614 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 283.593µs" +ts=2024-05-02T12:17:24.118049622Z caller=http.go:194 level=debug traceID=1a958dd3de158eb7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.74742ms" +ts=2024-05-02T12:17:24.117651047Z caller=http.go:194 level=debug traceID=3dda40ecfd8fe20a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.555805ms" +ts=2024-05-02T12:17:24.116781274Z caller=http.go:194 level=debug traceID=7f4be7b688d80fcf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.33394ms" +ts=2024-05-02T12:17:24.116679941Z caller=http.go:194 level=debug traceID=50d3763ee9ed6fd4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.797663ms" +ts=2024-05-02T12:17:24.116576847Z caller=http.go:194 level=debug traceID=2bf9fa7531ef1f4c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.860991ms" +ts=2024-05-02T12:17:24.116195194Z caller=http.go:194 level=debug traceID=47636695cdc1b790 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.312777ms" +ts=2024-05-02T12:17:24.11565144Z caller=http.go:194 level=debug traceID=50d9723cb5b1200b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.867933ms" +ts=2024-05-02T12:17:24.115137931Z caller=http.go:194 level=debug traceID=283176ee9e882784 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.04715ms" +ts=2024-05-02T12:17:24.115023822Z caller=http.go:194 level=debug traceID=67f103659b44346b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.692444ms" +ts=2024-05-02T12:17:24.114606256Z caller=http.go:194 level=debug traceID=29417e4fc06fd561 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 320.457µs" +ts=2024-05-02T12:17:24.114534384Z caller=http.go:194 level=debug traceID=474c2b554cd87a4b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.703334ms" +ts=2024-05-02T12:17:24.114430578Z caller=http.go:194 level=debug traceID=33ab47d37a114e6e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.020753ms" +ts=2024-05-02T12:17:24.113488131Z caller=http.go:194 level=debug traceID=4d8d682855e60ffe orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.661799ms" +ts=2024-05-02T12:17:24.113267496Z caller=http.go:194 level=debug traceID=4645f71be503e4cd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.053101ms" +ts=2024-05-02T12:17:24.112801714Z caller=http.go:194 level=debug traceID=5645e7739896355b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.817502ms" +ts=2024-05-02T12:17:24.112629794Z caller=http.go:194 level=debug traceID=4394b7ab6c9b6668 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.907978ms" +ts=2024-05-02T12:17:24.112372217Z caller=http.go:194 level=debug traceID=3810c6d7e0062635 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.144582ms" +ts=2024-05-02T12:17:24.112150637Z caller=http.go:194 level=debug traceID=7d7abf17c821882f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.261857ms" +ts=2024-05-02T12:17:24.111916608Z caller=http.go:194 level=debug traceID=1204b0383e6d041f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.686713ms" +ts=2024-05-02T12:17:24.111813606Z caller=http.go:194 level=debug traceID=7f4be7b688d80fcf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.046795ms" +ts=2024-05-02T12:17:24.111436791Z caller=http.go:194 level=debug traceID=63172d2db41bbe93 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.656477ms" +ts=2024-05-02T12:17:24.110948486Z caller=http.go:194 level=debug traceID=768dda10525d960c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.684582ms" +ts=2024-05-02T12:17:24.109485096Z caller=http.go:194 level=debug traceID=14881f30c0491c55 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.51564ms" +ts=2024-05-02T12:17:24.108634466Z caller=http.go:194 level=debug traceID=3e828dbb13c85945 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.325451ms" +ts=2024-05-02T12:17:24.10831326Z caller=http.go:194 level=debug traceID=6365d3db496db614 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 377.601µs" +ts=2024-05-02T12:17:24.108307016Z caller=http.go:194 level=debug traceID=7aea63df3a8f1c75 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.359601ms" +ts=2024-05-02T12:17:24.106988639Z caller=http.go:194 level=debug traceID=7df09b3dba83e96c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.741372ms" +ts=2024-05-02T12:17:24.106762264Z caller=http.go:194 level=debug traceID=0657752ce2a76372 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.231486ms" +ts=2024-05-02T12:17:24.106132192Z caller=http.go:194 level=debug traceID=3dda40ecfd8fe20a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.78351ms" +ts=2024-05-02T12:17:24.106091476Z caller=http.go:194 level=debug traceID=1115defb209a4e5c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.015734ms" +ts=2024-05-02T12:17:24.105460085Z caller=http.go:194 level=debug traceID=2f897c0e13771f81 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.165359ms" +ts=2024-05-02T12:17:24.10481968Z caller=http.go:194 level=debug traceID=22f22d3d52fd5043 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.167098ms" +ts=2024-05-02T12:17:24.104578699Z caller=http.go:194 level=debug traceID=50d9723cb5b1200b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.473973ms" +ts=2024-05-02T12:17:24.104421944Z caller=http.go:194 level=debug traceID=29708a7e34138b4b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.464739ms" +ts=2024-05-02T12:17:24.104090019Z caller=http.go:194 level=debug traceID=29417e4fc06fd561 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 423.403µs" +ts=2024-05-02T12:17:24.103962353Z caller=http.go:194 level=debug traceID=474c2b554cd87a4b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.817483ms" +ts=2024-05-02T12:17:24.10383547Z caller=http.go:194 level=debug traceID=2bf9fa7531ef1f4c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.631223ms" +ts=2024-05-02T12:17:24.103725265Z caller=http.go:194 level=debug traceID=33ab47d37a114e6e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.530512ms" +ts=2024-05-02T12:17:24.103653796Z caller=http.go:194 level=debug traceID=67f103659b44346b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.807093ms" +ts=2024-05-02T12:17:24.102766836Z caller=http.go:194 level=debug traceID=43007dffdf367728 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.774555ms" +ts=2024-05-02T12:17:24.102494774Z caller=http.go:194 level=debug traceID=4394b7ab6c9b6668 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.771691ms" +ts=2024-05-02T12:17:24.102028542Z caller=http.go:194 level=debug traceID=4645f71be503e4cd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.906396ms" +ts=2024-05-02T12:17:24.101821506Z caller=http.go:194 level=debug traceID=4d8d682855e60ffe orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.12355ms" +ts=2024-05-02T12:17:24.101298519Z caller=http.go:194 level=debug traceID=0dcf0ad922f8f255 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.774519ms" +ts=2024-05-02T12:17:24.101244736Z caller=http.go:194 level=debug traceID=759c2429448fcd5f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 13.129636ms" +ts=2024-05-02T12:17:24.098586564Z caller=http.go:194 level=debug traceID=7aea63df3a8f1c75 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.714288ms" +ts=2024-05-02T12:17:24.098111245Z caller=http.go:194 level=debug traceID=610f57c6ce63f53e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.17976ms" +ts=2024-05-02T12:17:24.097620762Z caller=http.go:194 level=debug traceID=3e828dbb13c85945 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.103375ms" +ts=2024-05-02T12:17:24.097307375Z caller=http.go:194 level=debug traceID=1793cc08cbb9aba9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.512465ms" +ts=2024-05-02T12:17:24.096993149Z caller=http.go:194 level=debug traceID=6ec7ac7a7449edf2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.114047ms" +ts=2024-05-02T12:17:24.096898595Z caller=http.go:194 level=debug traceID=5a06762a0d3ba0d4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.901861ms" +ts=2024-05-02T12:17:24.096931771Z caller=http.go:194 level=debug traceID=197431d41a875003 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.477652ms" +ts=2024-05-02T12:17:24.096187112Z caller=http.go:194 level=debug traceID=44b2d7946cb33626 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.658539ms" +ts=2024-05-02T12:17:24.095990412Z caller=http.go:194 level=debug traceID=26e8c505da9c521d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.973305ms" +ts=2024-05-02T12:17:24.095369125Z caller=http.go:194 level=debug traceID=382e0186e635ad16 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.470214ms" +ts=2024-05-02T12:17:24.095200032Z caller=http.go:194 level=debug traceID=6b43e1b5bab99869 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.927506ms" +ts=2024-05-02T12:17:24.094847776Z caller=http.go:194 level=debug traceID=1115defb209a4e5c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.629031ms" +ts=2024-05-02T12:17:24.094477669Z caller=http.go:194 level=debug traceID=2191f9f7d82bb014 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.40918ms" +ts=2024-05-02T12:17:24.094320459Z caller=http.go:194 level=debug traceID=29708a7e34138b4b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.582566ms" +ts=2024-05-02T12:17:24.094335816Z caller=http.go:194 level=debug traceID=02449b8e967e550b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.68786ms" +ts=2024-05-02T12:17:24.094264366Z caller=http.go:194 level=debug traceID=0657752ce2a76372 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.715959ms" +ts=2024-05-02T12:17:24.094193019Z caller=http.go:194 level=debug traceID=102d5bd176f38c90 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.507045ms" +ts=2024-05-02T12:17:24.093720531Z caller=http.go:194 level=debug traceID=2f897c0e13771f81 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.757721ms" +ts=2024-05-02T12:17:24.093200264Z caller=http.go:194 level=debug traceID=22f22d3d52fd5043 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.802804ms" +ts=2024-05-02T12:17:24.093055095Z caller=http.go:194 level=debug traceID=14826d86a7b8fd04 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 19.346987ms" +ts=2024-05-02T12:17:24.092538643Z caller=http.go:194 level=debug traceID=6c01212e55baddbd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.573554ms" +ts=2024-05-02T12:17:24.092260001Z caller=http.go:194 level=debug traceID=0e106d54918072dc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.24079ms" +ts=2024-05-02T12:17:24.091214144Z caller=http.go:194 level=debug traceID=43007dffdf367728 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.511918ms" +ts=2024-05-02T12:17:24.089930402Z caller=http.go:194 level=debug traceID=0dcf0ad922f8f255 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.589076ms" +ts=2024-05-02T12:17:24.088891869Z caller=http.go:194 level=debug traceID=4a220515ea8171b8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.853368ms" +ts=2024-05-02T12:17:24.088800236Z caller=http.go:194 level=debug traceID=759c2429448fcd5f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.170988ms" +ts=2024-05-02T12:17:24.087450457Z caller=http.go:194 level=debug traceID=1ff4fdca05e5d4b3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.538423ms" +ts=2024-05-02T12:17:24.08721657Z caller=http.go:194 level=debug traceID=610f57c6ce63f53e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.525679ms" +ts=2024-05-02T12:17:24.086952206Z caller=http.go:194 level=debug traceID=0028060710fb1099 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.463583ms" +ts=2024-05-02T12:17:24.086855101Z caller=http.go:194 level=debug traceID=7ab09461c44a1f72 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.999856ms" +ts=2024-05-02T12:17:24.086413683Z caller=http.go:194 level=debug traceID=197431d41a875003 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.604917ms" +ts=2024-05-02T12:17:24.086134296Z caller=http.go:194 level=debug traceID=1793cc08cbb9aba9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.090494ms" +ts=2024-05-02T12:17:24.086116086Z caller=http.go:194 level=debug traceID=6ec7ac7a7449edf2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.394107ms" +ts=2024-05-02T12:17:24.085900884Z caller=http.go:194 level=debug traceID=5fda6ec14aace2be orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.726286ms" +ts=2024-05-02T12:17:24.085784431Z caller=http.go:194 level=debug traceID=33419a044b1259cc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.565662ms" +ts=2024-05-02T12:17:24.085847763Z caller=http.go:194 level=debug traceID=44b2d7946cb33626 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.51307ms" +ts=2024-05-02T12:17:24.085664744Z caller=http.go:194 level=debug traceID=23ffe13b4b51df36 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.138421ms" +ts=2024-05-02T12:17:24.085644114Z caller=http.go:194 level=debug traceID=5a06762a0d3ba0d4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.582922ms" +ts=2024-05-02T12:17:24.084677605Z caller=http.go:194 level=debug traceID=6b43e1b5bab99869 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.488638ms" +ts=2024-05-02T12:17:24.084031166Z caller=http.go:194 level=debug traceID=120292ab1dfa0b61 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.883106ms" +ts=2024-05-02T12:17:24.0836778Z caller=http.go:194 level=debug traceID=3180513b7fc82bb1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 267.134µs" +ts=2024-05-02T12:17:24.083541862Z caller=http.go:194 level=debug traceID=2191f9f7d82bb014 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.029722ms" +ts=2024-05-02T12:17:24.083474089Z caller=http.go:194 level=debug traceID=02449b8e967e550b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.829682ms" +ts=2024-05-02T12:17:24.083052965Z caller=http.go:194 level=debug traceID=102d5bd176f38c90 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.979142ms" +ts=2024-05-02T12:17:24.083026978Z caller=http.go:194 level=debug traceID=6c01212e55baddbd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.953104ms" +ts=2024-05-02T12:17:24.082483299Z caller=http.go:194 level=debug traceID=118c28c9fbbef782 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.076146ms" +ts=2024-05-02T12:17:24.081946239Z caller=http.go:194 level=debug traceID=0e106d54918072dc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.041911ms" +ts=2024-05-02T12:17:24.081834824Z caller=http.go:194 level=debug traceID=26e8c505da9c521d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.074798ms" +ts=2024-05-02T12:17:24.081565404Z caller=http.go:194 level=debug traceID=2af9a99841d0ea12 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.349718ms" +ts=2024-05-02T12:17:24.081048902Z caller=http.go:194 level=debug traceID=382e0186e635ad16 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.347675ms" +ts=2024-05-02T12:17:24.080008843Z caller=http.go:194 level=debug traceID=624262b4607014a7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 271.599µs" +ts=2024-05-02T12:17:24.079984399Z caller=http.go:194 level=debug traceID=73d038cc70ca9aa6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.210617ms" +ts=2024-05-02T12:17:24.079831799Z caller=http.go:194 level=debug traceID=6e075d10f30436a2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.567694ms" +ts=2024-05-02T12:17:24.078970727Z caller=http.go:194 level=debug traceID=70dc82e5828f7ec9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.154438ms" +ts=2024-05-02T12:17:24.078756527Z caller=http.go:194 level=debug traceID=2af9a99841d0ea12 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.67855ms" +ts=2024-05-02T12:17:24.078183974Z caller=http.go:194 level=debug traceID=4a220515ea8171b8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.135385ms" +ts=2024-05-02T12:17:24.078053819Z caller=http.go:194 level=debug traceID=1ff4fdca05e5d4b3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.260378ms" +ts=2024-05-02T12:17:24.077910834Z caller=http.go:194 level=debug traceID=44bfafc548f96937 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.710409ms" +ts=2024-05-02T12:17:24.077558353Z caller=http.go:194 level=debug traceID=7532e2ddc3ce0d07 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.423796ms" +ts=2024-05-02T12:17:24.076809521Z caller=http.go:194 level=debug traceID=7ea2868281f5a452 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.722413ms" +ts=2024-05-02T12:17:24.076053241Z caller=http.go:194 level=debug traceID=41b3d4da0bfdcd85 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.60388ms" +ts=2024-05-02T12:17:24.075658281Z caller=http.go:194 level=debug traceID=2e2a440ac7ef84af orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.482418ms" +ts=2024-05-02T12:17:24.075564782Z caller=http.go:194 level=debug traceID=7ab09461c44a1f72 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.074426ms" +ts=2024-05-02T12:17:24.075598151Z caller=http.go:194 level=debug traceID=0028060710fb1099 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.149316ms" +ts=2024-05-02T12:17:24.075479659Z caller=http.go:194 level=debug traceID=5fda6ec14aace2be orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.044509ms" +ts=2024-05-02T12:17:24.075141092Z caller=http.go:194 level=debug traceID=0921cd894ffab2c3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.657742ms" +ts=2024-05-02T12:17:24.074876437Z caller=http.go:194 level=debug traceID=23ffe13b4b51df36 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.767643ms" +ts=2024-05-02T12:17:24.074587527Z caller=http.go:194 level=debug traceID=120292ab1dfa0b61 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.622487ms" +ts=2024-05-02T12:17:24.074244947Z caller=http.go:194 level=debug traceID=33419a044b1259cc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.540124ms" +ts=2024-05-02T12:17:24.073837289Z caller=http.go:194 level=debug traceID=0b2a92c7df7528a7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.664652ms" +ts=2024-05-02T12:17:24.073362832Z caller=http.go:194 level=debug traceID=3180513b7fc82bb1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 557.422µs" +ts=2024-05-02T12:17:24.072637534Z caller=http.go:194 level=debug traceID=69cad74fe941adc6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.993838ms" +ts=2024-05-02T12:17:24.072447478Z caller=http.go:194 level=debug traceID=386def9d6c5a27ff orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.714201ms" +ts=2024-05-02T12:17:24.071607358Z caller=http.go:194 level=debug traceID=377b0581eee1ef07 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.101415ms" +ts=2024-05-02T12:17:24.069986514Z caller=http.go:194 level=debug traceID=118c28c9fbbef782 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.684343ms" +ts=2024-05-02T12:17:24.069663499Z caller=http.go:194 level=debug traceID=6e075d10f30436a2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.455832ms" +ts=2024-05-02T12:17:24.069429144Z caller=http.go:194 level=debug traceID=571895906c6a1b16 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.071227ms" +ts=2024-05-02T12:17:24.069313776Z caller=http.go:194 level=debug traceID=6be8094560bed321 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.968519ms" +ts=2024-05-02T12:17:24.069060859Z caller=http.go:194 level=debug traceID=624262b4607014a7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 407.958µs" +ts=2024-05-02T12:17:24.069053295Z caller=http.go:194 level=debug traceID=73d038cc70ca9aa6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.580657ms" +ts=2024-05-02T12:17:24.069102332Z caller=http.go:194 level=debug traceID=6ed26b3410c6776a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.756407ms" +ts=2024-05-02T12:17:24.06857273Z caller=http.go:194 level=debug traceID=41b3d4da0bfdcd85 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.189666ms" +ts=2024-05-02T12:17:24.068294125Z caller=http.go:194 level=debug traceID=19d9aa8dc31d1dbd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.660814ms" +ts=2024-05-02T12:17:24.067904731Z caller=http.go:194 level=debug traceID=5be903e49737abad orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.475176ms" +ts=2024-05-02T12:17:24.067543948Z caller=http.go:194 level=debug traceID=70dc82e5828f7ec9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.362949ms" +ts=2024-05-02T12:17:24.066796253Z caller=http.go:194 level=debug traceID=2e2a440ac7ef84af orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.410792ms" +ts=2024-05-02T12:17:24.066565866Z caller=http.go:194 level=debug traceID=1d8e69c44474c5b7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.336931ms" +ts=2024-05-02T12:17:24.06563026Z caller=http.go:194 level=debug traceID=7ea2868281f5a452 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.909358ms" +ts=2024-05-02T12:17:24.065318572Z caller=http.go:194 level=debug traceID=44bfafc548f96937 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.694123ms" +ts=2024-05-02T12:17:24.065261757Z caller=http.go:194 level=debug traceID=7532e2ddc3ce0d07 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.733311ms" +ts=2024-05-02T12:17:24.064732634Z caller=http.go:194 level=debug traceID=747ce5ee611c1e8f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.753351ms" +ts=2024-05-02T12:17:24.064455324Z caller=http.go:194 level=debug traceID=0b2a92c7df7528a7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.803306ms" +ts=2024-05-02T12:17:24.06442789Z caller=http.go:194 level=debug traceID=2142727ec8947af8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.616544ms" +ts=2024-05-02T12:17:24.064392507Z caller=http.go:194 level=debug traceID=0921cd894ffab2c3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.467062ms" +ts=2024-05-02T12:17:24.064236457Z caller=http.go:194 level=debug traceID=0570c2d677600e94 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.100862ms" +ts=2024-05-02T12:17:24.062695749Z caller=http.go:194 level=debug traceID=3f8a6f7cfc23f425 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 48.069108ms" +ts=2024-05-02T12:17:24.062170306Z caller=http.go:194 level=debug traceID=75ace44bd21dfc9c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.837298ms" +ts=2024-05-02T12:17:24.06211284Z caller=http.go:194 level=debug traceID=386def9d6c5a27ff orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.869322ms" +ts=2024-05-02T12:17:24.061926958Z caller=http.go:194 level=debug traceID=59e1e0eef14a5ec6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.689566ms" +ts=2024-05-02T12:17:24.061685669Z caller=http.go:194 level=debug traceID=69cad74fe941adc6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.893352ms" +ts=2024-05-02T12:17:24.061369793Z caller=http.go:194 level=debug traceID=39969e4e16ce4aa7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.965159ms" +ts=2024-05-02T12:17:24.060800835Z caller=http.go:194 level=debug traceID=6d595c80b4c12376 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.36977ms" +ts=2024-05-02T12:17:24.06056211Z caller=http.go:194 level=debug traceID=377b0581eee1ef07 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.25378ms" +ts=2024-05-02T12:17:24.060017173Z caller=http.go:194 level=debug traceID=7b4c90c3df7e5bfa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.323732ms" +ts=2024-05-02T12:17:24.059365007Z caller=http.go:194 level=debug traceID=6be8094560bed321 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.058681ms" +ts=2024-05-02T12:17:24.058915834Z caller=http.go:194 level=debug traceID=6ed26b3410c6776a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.672954ms" +ts=2024-05-02T12:17:24.058716911Z caller=http.go:194 level=debug traceID=4e4013b8bf2de93b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.32288ms" +ts=2024-05-02T12:17:24.058513566Z caller=http.go:194 level=debug traceID=571895906c6a1b16 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.223313ms" +ts=2024-05-02T12:17:24.057898161Z caller=http.go:194 level=debug traceID=5be903e49737abad orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.817703ms" +ts=2024-05-02T12:17:24.057652916Z caller=http.go:194 level=debug traceID=541adc04c4894c5f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.770382ms" +ts=2024-05-02T12:17:24.057460309Z caller=http.go:194 level=debug traceID=19d9aa8dc31d1dbd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.376018ms" +ts=2024-05-02T12:17:24.056305858Z caller=http.go:194 level=debug traceID=42b2f3146e15eb83 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.707404ms" +ts=2024-05-02T12:17:24.055760899Z caller=http.go:194 level=debug traceID=1d8e69c44474c5b7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.689748ms" +ts=2024-05-02T12:17:24.055741983Z caller=http.go:194 level=debug traceID=126c09978ee21fdc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.269762ms" +ts=2024-05-02T12:17:24.055635276Z caller=http.go:194 level=debug traceID=0ebc104eb3278dbb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.553519ms" +ts=2024-05-02T12:17:24.055640853Z caller=http.go:194 level=debug traceID=3c08c7e198165694 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.005977ms" +ts=2024-05-02T12:17:24.053692196Z caller=http.go:194 level=debug traceID=747ce5ee611c1e8f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.945461ms" +ts=2024-05-02T12:17:24.052871709Z caller=http.go:194 level=debug traceID=0570c2d677600e94 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.202799ms" +ts=2024-05-02T12:17:24.052823352Z caller=http.go:194 level=debug traceID=3b114cec92b7e3f8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.240361ms" +ts=2024-05-02T12:17:24.052779919Z caller=http.go:194 level=debug traceID=04c6892f5670237f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.661014ms" +ts=2024-05-02T12:17:24.051923233Z caller=http.go:194 level=debug traceID=75ace44bd21dfc9c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.545777ms" +ts=2024-05-02T12:17:24.051887275Z caller=http.go:194 level=debug traceID=2142727ec8947af8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.670464ms" +ts=2024-05-02T12:17:24.051070851Z caller=http.go:194 level=debug traceID=38b128757bf00da6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.170731ms" +ts=2024-05-02T12:17:24.050889668Z caller=http.go:194 level=debug traceID=687c0c1628e6bf2c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.322449ms" +ts=2024-05-02T12:17:24.05065156Z caller=http.go:194 level=debug traceID=308955d58d68e89d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.954707ms" +ts=2024-05-02T12:17:24.049624117Z caller=http.go:194 level=debug traceID=39969e4e16ce4aa7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.584664ms" +ts=2024-05-02T12:17:24.049404302Z caller=http.go:194 level=debug traceID=6d595c80b4c12376 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.163605ms" +ts=2024-05-02T12:17:24.049466604Z caller=http.go:194 level=debug traceID=59e1e0eef14a5ec6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.164944ms" +ts=2024-05-02T12:17:24.048923514Z caller=http.go:194 level=debug traceID=38f05aa8d9b0bb73 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.393451ms" +ts=2024-05-02T12:17:24.048132707Z caller=http.go:194 level=debug traceID=6191080dfe6ce703 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.121834ms" +ts=2024-05-02T12:17:24.048164984Z caller=http.go:194 level=debug traceID=4de6945d754d28a6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.661323ms" +ts=2024-05-02T12:17:24.048082195Z caller=http.go:194 level=debug traceID=7b4c90c3df7e5bfa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.871305ms" +ts=2024-05-02T12:17:24.047810896Z caller=http.go:194 level=debug traceID=541adc04c4894c5f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.250692ms" +ts=2024-05-02T12:17:24.047657887Z caller=http.go:194 level=debug traceID=4ce2df10258df311 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.107492ms" +ts=2024-05-02T12:17:24.047464974Z caller=http.go:194 level=debug traceID=31b1ef428b982a37 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.841961ms" +ts=2024-05-02T12:17:24.046856689Z caller=http.go:194 level=debug traceID=4e4013b8bf2de93b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.69514ms" +ts=2024-05-02T12:17:24.046819272Z caller=http.go:194 level=debug traceID=0c2dc2ed9cab9b18 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.238194ms" +ts=2024-05-02T12:17:24.046135673Z caller=http.go:194 level=debug traceID=12d8227196b4c7f9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.402732ms" +ts=2024-05-02T12:17:24.04612084Z caller=http.go:194 level=debug traceID=0c2dc2ed9cab9b18 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 32.184796ms" +ts=2024-05-02T12:17:24.045872701Z caller=http.go:194 level=debug traceID=57c7da0e611a9d1c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.174418ms" +ts=2024-05-02T12:17:24.045820188Z caller=http.go:194 level=debug traceID=42b2f3146e15eb83 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.700719ms" +ts=2024-05-02T12:17:24.045009736Z caller=http.go:194 level=debug traceID=3c08c7e198165694 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.869174ms" +ts=2024-05-02T12:17:24.044767089Z caller=http.go:194 level=debug traceID=0ebc104eb3278dbb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.610144ms" +ts=2024-05-02T12:17:24.04334068Z caller=http.go:194 level=debug traceID=126c09978ee21fdc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.614914ms" +ts=2024-05-02T12:17:24.041815133Z caller=http.go:194 level=debug traceID=5ca250bd0c1065b3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.983428ms" +ts=2024-05-02T12:17:24.041873788Z caller=http.go:194 level=debug traceID=649038e29701556d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.992642ms" +ts=2024-05-02T12:17:24.04085583Z caller=http.go:194 level=debug traceID=04c6892f5670237f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.170073ms" +ts=2024-05-02T12:17:24.040293604Z caller=http.go:194 level=debug traceID=3585b97f20eb39c9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.404316ms" +ts=2024-05-02T12:17:24.040447749Z caller=http.go:194 level=debug traceID=46a66ebbe3434a3d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.942286ms" +ts=2024-05-02T12:17:24.040377519Z caller=http.go:194 level=debug traceID=3b114cec92b7e3f8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.628435ms" +ts=2024-05-02T12:17:24.040319615Z caller=http.go:194 level=debug traceID=38b128757bf00da6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.276739ms" +ts=2024-05-02T12:17:24.04020228Z caller=http.go:194 level=debug traceID=687c0c1628e6bf2c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.012527ms" +ts=2024-05-02T12:17:24.040088941Z caller=http.go:194 level=debug traceID=08d3d8eb11439082 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.731072ms" +ts=2024-05-02T12:17:24.039753797Z caller=http.go:194 level=debug traceID=308955d58d68e89d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.38924ms" +ts=2024-05-02T12:17:24.038164677Z caller=http.go:194 level=debug traceID=57c7da0e611a9d1c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.368291ms" +ts=2024-05-02T12:17:24.038045028Z caller=http.go:194 level=debug traceID=38f05aa8d9b0bb73 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.581624ms" +ts=2024-05-02T12:17:24.037872814Z caller=http.go:194 level=debug traceID=11e8637e7065f223 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.709113ms" +ts=2024-05-02T12:17:24.037805817Z caller=http.go:194 level=debug traceID=4de6945d754d28a6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.620294ms" +ts=2024-05-02T12:17:24.037240917Z caller=http.go:194 level=debug traceID=4ce2df10258df311 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.917246ms" +ts=2024-05-02T12:17:24.036599678Z caller=http.go:194 level=debug traceID=1cfbf77bdeee46d2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.910245ms" +ts=2024-05-02T12:17:24.03648246Z caller=http.go:194 level=debug traceID=3e98e2709eeb1dfa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.050279ms" +ts=2024-05-02T12:17:24.036385077Z caller=http.go:194 level=debug traceID=31b1ef428b982a37 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.845464ms" +ts=2024-05-02T12:17:24.036288657Z caller=http.go:194 level=debug traceID=6191080dfe6ce703 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.966075ms" +ts=2024-05-02T12:17:24.036239289Z caller=http.go:194 level=debug traceID=3069284e99c72765 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.637463ms" +ts=2024-05-02T12:17:24.036208473Z caller=http.go:194 level=debug traceID=7012de3c3299d1b8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.771722ms" +ts=2024-05-02T12:17:24.033068205Z caller=http.go:194 level=debug traceID=58bc60de933eec15 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.129878ms" +ts=2024-05-02T12:17:24.032417321Z caller=http.go:194 level=debug traceID=44d64bea141453e7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.292559ms" +ts=2024-05-02T12:17:24.032156405Z caller=http.go:194 level=debug traceID=649038e29701556d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.582864ms" +ts=2024-05-02T12:17:24.03202858Z caller=http.go:194 level=debug traceID=51cc75eee75d33e6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.713565ms" +ts=2024-05-02T12:17:24.031204011Z caller=http.go:194 level=debug traceID=5ca250bd0c1065b3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.635198ms" +ts=2024-05-02T12:17:24.030812783Z caller=http.go:194 level=debug traceID=12d8227196b4c7f9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.561979ms" +ts=2024-05-02T12:17:24.030623199Z caller=http.go:194 level=debug traceID=0bcf089085e4e866 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.478187ms" +ts=2024-05-02T12:17:24.030202634Z caller=http.go:194 level=debug traceID=6fd43624ea4d8665 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.373792ms" +ts=2024-05-02T12:17:24.029595907Z caller=http.go:194 level=debug traceID=46a66ebbe3434a3d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.253981ms" +ts=2024-05-02T12:17:24.029470157Z caller=http.go:194 level=debug traceID=08d3d8eb11439082 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.233451ms" +ts=2024-05-02T12:17:24.029211579Z caller=http.go:194 level=debug traceID=32a7dd07cad3c46f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.187333ms" +ts=2024-05-02T12:17:24.028898206Z caller=http.go:194 level=debug traceID=3069284e99c72765 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.04598ms" +ts=2024-05-02T12:17:24.02880975Z caller=http.go:194 level=debug traceID=3585b97f20eb39c9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.331963ms" +ts=2024-05-02T12:17:24.028619038Z caller=http.go:194 level=debug traceID=11e8637e7065f223 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.425292ms" +ts=2024-05-02T12:17:24.028071125Z caller=http.go:194 level=debug traceID=6f20eafa3cd8708f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.43086ms" +ts=2024-05-02T12:17:24.027640704Z caller=http.go:194 level=debug traceID=3e98e2709eeb1dfa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.460246ms" +ts=2024-05-02T12:17:24.026106044Z caller=http.go:194 level=debug traceID=2afac9383bc4c542 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 314.644µs" +ts=2024-05-02T12:17:24.02545085Z caller=http.go:194 level=debug traceID=731482c3c7f02800 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 18.849956ms" +ts=2024-05-02T12:17:24.025251511Z caller=http.go:194 level=debug traceID=1cfbf77bdeee46d2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.830013ms" +ts=2024-05-02T12:17:24.024937144Z caller=http.go:194 level=debug traceID=07dc1030ad78d203 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.827856ms" +ts=2024-05-02T12:17:24.024574301Z caller=http.go:194 level=debug traceID=14c527626363bcf3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.301573ms" +ts=2024-05-02T12:17:24.024414773Z caller=http.go:194 level=debug traceID=7012de3c3299d1b8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 28.054368ms" +ts=2024-05-02T12:17:24.024019291Z caller=http.go:194 level=debug traceID=3f8a6f7cfc23f425 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 21.166143ms" +ts=2024-05-02T12:17:24.023813565Z caller=http.go:194 level=debug traceID=389425582e62c70b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.293475ms" +ts=2024-05-02T12:17:24.0223918Z caller=http.go:194 level=debug traceID=58bc60de933eec15 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.530298ms" +ts=2024-05-02T12:17:24.02193632Z caller=http.go:194 level=debug traceID=671caddef4f5d898 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.182038ms" +ts=2024-05-02T12:17:24.022006367Z caller=http.go:194 level=debug traceID=79c4dc4f40f41814 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.009077ms" +ts=2024-05-02T12:17:24.021925819Z caller=http.go:194 level=debug traceID=2a007a714e00c318 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.732334ms" +ts=2024-05-02T12:17:24.021038433Z caller=http.go:194 level=debug traceID=44d64bea141453e7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.226413ms" +ts=2024-05-02T12:17:24.020871819Z caller=http.go:194 level=debug traceID=42b4bdb97fd62f06 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.211754ms" +ts=2024-05-02T12:17:24.020648102Z caller=http.go:194 level=debug traceID=51cc75eee75d33e6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.748586ms" +ts=2024-05-02T12:17:24.020088638Z caller=http.go:194 level=debug traceID=0bcf089085e4e866 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.900985ms" +ts=2024-05-02T12:17:24.019718922Z caller=http.go:194 level=debug traceID=32a7dd07cad3c46f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.692767ms" +ts=2024-05-02T12:17:24.018886198Z caller=http.go:194 level=debug traceID=6f20eafa3cd8708f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.209013ms" +ts=2024-05-02T12:17:24.0184116Z caller=http.go:194 level=debug traceID=6fd43624ea4d8665 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.2735ms" +ts=2024-05-02T12:17:24.017315041Z caller=http.go:194 level=debug traceID=47ac8c75640fc463 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.071662ms" +ts=2024-05-02T12:17:24.01590237Z caller=http.go:194 level=debug traceID=2afac9383bc4c542 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 259.26µs" +ts=2024-05-02T12:17:24.014755475Z caller=http.go:194 level=debug traceID=2e2ea65641db835b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.181189ms" +ts=2024-05-02T12:17:24.014310881Z caller=http.go:194 level=debug traceID=4aecdbe8cf5d37f4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.770561ms" +ts=2024-05-02T12:17:24.013809322Z caller=http.go:194 level=debug traceID=0ae9a3066a270840 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.48187ms" +ts=2024-05-02T12:17:24.01325496Z caller=http.go:194 level=debug traceID=3f1ca06638c0d052 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.574149ms" +ts=2024-05-02T12:17:24.01313278Z caller=http.go:194 level=debug traceID=07dc1030ad78d203 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.823508ms" +ts=2024-05-02T12:17:24.013096928Z caller=http.go:194 level=debug traceID=14c527626363bcf3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.416062ms" +ts=2024-05-02T12:17:24.012044348Z caller=http.go:194 level=debug traceID=389425582e62c70b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.533657ms" +ts=2024-05-02T12:17:24.011786594Z caller=http.go:194 level=debug traceID=2a007a714e00c318 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.987232ms" +ts=2024-05-02T12:17:24.01162003Z caller=http.go:194 level=debug traceID=42b4bdb97fd62f06 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.819104ms" +ts=2024-05-02T12:17:24.011451014Z caller=http.go:194 level=debug traceID=79c4dc4f40f41814 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.508446ms" +ts=2024-05-02T12:17:24.010768305Z caller=http.go:194 level=debug traceID=67b72a3b49f58a56 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.543429ms" +ts=2024-05-02T12:17:24.009989236Z caller=http.go:194 level=debug traceID=671caddef4f5d898 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.643076ms" +ts=2024-05-02T12:17:24.009995074Z caller=http.go:194 level=debug traceID=41d5913f92fa2d03 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.767853ms" +ts=2024-05-02T12:17:24.009619259Z caller=http.go:194 level=debug traceID=6e11f9589b9d0205 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.849823ms" +ts=2024-05-02T12:17:24.009429681Z caller=http.go:194 level=debug traceID=2c9b9cb9ade1f983 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.384964ms" +ts=2024-05-02T12:17:24.009370782Z caller=http.go:194 level=debug traceID=731482c3c7f02800 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.15888ms" +ts=2024-05-02T12:17:24.006675203Z caller=http.go:194 level=debug traceID=1edd8531e868bc54 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.75032ms" +ts=2024-05-02T12:17:24.006532551Z caller=http.go:194 level=debug traceID=47ac8c75640fc463 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.602719ms" +ts=2024-05-02T12:17:24.006279502Z caller=http.go:194 level=debug traceID=3c0f816cbe1076fb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.445671ms" +ts=2024-05-02T12:17:24.004680693Z caller=http.go:194 level=debug traceID=6c31700a196d9f4b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.988936ms" +ts=2024-05-02T12:17:24.004579335Z caller=http.go:194 level=debug traceID=362e9c7e40872efd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.783736ms" +ts=2024-05-02T12:17:24.004606546Z caller=http.go:194 level=debug traceID=4aecdbe8cf5d37f4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.955265ms" +ts=2024-05-02T12:17:24.003840472Z caller=http.go:194 level=debug traceID=2e2ea65641db835b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.728365ms" +ts=2024-05-02T12:17:24.002888111Z caller=http.go:194 level=debug traceID=0ae9a3066a270840 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.71663ms" +ts=2024-05-02T12:17:24.002796869Z caller=http.go:194 level=debug traceID=5b9a913a5ef1c197 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.531489ms" +ts=2024-05-02T12:17:24.002421451Z caller=http.go:194 level=debug traceID=3f1ca06638c0d052 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.717352ms" +ts=2024-05-02T12:17:24.001586036Z caller=http.go:194 level=debug traceID=691e38e30183b92d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.526025ms" +ts=2024-05-02T12:17:24.000875975Z caller=http.go:194 level=debug traceID=67b72a3b49f58a56 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.932966ms" +ts=2024-05-02T12:17:24.00067863Z caller=http.go:194 level=debug traceID=01633fa2bb55bf54 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.83227ms" +ts=2024-05-02T12:17:24.000634897Z caller=http.go:194 level=debug traceID=41d5913f92fa2d03 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.19753ms" +ts=2024-05-02T12:17:23.999551199Z caller=http.go:194 level=debug traceID=6e11f9589b9d0205 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.815457ms" +ts=2024-05-02T12:17:23.998646897Z caller=http.go:194 level=debug traceID=0bab9cb1f1d81847 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.764812ms" +ts=2024-05-02T12:17:23.997686531Z caller=http.go:194 level=debug traceID=2c9b9cb9ade1f983 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.881119ms" +ts=2024-05-02T12:17:23.997024227Z caller=http.go:194 level=debug traceID=7be438c77363ad4c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.766499ms" +ts=2024-05-02T12:17:23.996891463Z caller=http.go:194 level=debug traceID=1edd8531e868bc54 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.652787ms" +ts=2024-05-02T12:17:23.995951239Z caller=http.go:194 level=debug traceID=351848bad19cbcea orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.385137ms" +ts=2024-05-02T12:17:23.995488845Z caller=http.go:194 level=debug traceID=5e8e00ef9b747fe6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.667257ms" +ts=2024-05-02T12:17:23.995041655Z caller=http.go:194 level=debug traceID=3c0f816cbe1076fb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.429348ms" +ts=2024-05-02T12:17:23.994840011Z caller=http.go:194 level=debug traceID=4ee984ba2b437cf4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.684094ms" +ts=2024-05-02T12:17:23.994838388Z caller=http.go:194 level=debug traceID=362e9c7e40872efd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.197538ms" +ts=2024-05-02T12:17:23.99425484Z caller=http.go:194 level=debug traceID=38c696619025670f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.956675ms" +ts=2024-05-02T12:17:23.993169408Z caller=http.go:194 level=debug traceID=6c31700a196d9f4b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.665232ms" +ts=2024-05-02T12:17:23.991544886Z caller=http.go:194 level=debug traceID=691e38e30183b92d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.403296ms" +ts=2024-05-02T12:17:23.991522479Z caller=http.go:194 level=debug traceID=11d77e025d05258b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.37211ms" +ts=2024-05-02T12:17:23.991126117Z caller=http.go:194 level=debug traceID=5b9a913a5ef1c197 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.895731ms" +ts=2024-05-02T12:17:23.989759075Z caller=http.go:194 level=debug traceID=01633fa2bb55bf54 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.108981ms" +ts=2024-05-02T12:17:23.988797681Z caller=http.go:194 level=debug traceID=09e7b429b8b1ece0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.903135ms" +ts=2024-05-02T12:17:23.988244442Z caller=http.go:194 level=debug traceID=47b5dcd0e06d00d9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.626727ms" +ts=2024-05-02T12:17:23.986435664Z caller=http.go:194 level=debug traceID=4ee984ba2b437cf4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.996709ms" +ts=2024-05-02T12:17:23.986377129Z caller=http.go:194 level=debug traceID=75ee556d646f2a11 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.147401ms" +ts=2024-05-02T12:17:23.986120929Z caller=http.go:194 level=debug traceID=7be438c77363ad4c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.203223ms" +ts=2024-05-02T12:17:23.985653358Z caller=http.go:194 level=debug traceID=0bab9cb1f1d81847 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.673992ms" +ts=2024-05-02T12:17:23.985689867Z caller=http.go:194 level=debug traceID=544bbd0a41494588 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.999022ms" +ts=2024-05-02T12:17:23.984421931Z caller=http.go:194 level=debug traceID=351848bad19cbcea orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.16454ms" +ts=2024-05-02T12:17:23.983332643Z caller=http.go:194 level=debug traceID=379cce12fd4e4f29 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.939397ms" +ts=2024-05-02T12:17:23.982930997Z caller=http.go:194 level=debug traceID=5e8e00ef9b747fe6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.494235ms" +ts=2024-05-02T12:17:23.982755448Z caller=http.go:194 level=debug traceID=38c696619025670f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.872012ms" +ts=2024-05-02T12:17:23.982612967Z caller=http.go:194 level=debug traceID=402e34409cc22484 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.318149ms" +ts=2024-05-02T12:17:23.982495847Z caller=http.go:194 level=debug traceID=7f833857748ee68b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.527969ms" +ts=2024-05-02T12:17:23.982008202Z caller=http.go:194 level=debug traceID=49b04e6b2346ed41 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.570791ms" +ts=2024-05-02T12:17:23.980384361Z caller=http.go:194 level=debug traceID=11d77e025d05258b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.232844ms" +ts=2024-05-02T12:17:23.980216547Z caller=http.go:194 level=debug traceID=3cd413a1bb6f131e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.903118ms" +ts=2024-05-02T12:17:23.978529468Z caller=http.go:194 level=debug traceID=3f6161c11bf029e8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.367515ms" +ts=2024-05-02T12:17:23.978543662Z caller=http.go:194 level=debug traceID=6e45d582e77112a5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.061893ms" +ts=2024-05-02T12:17:23.977938219Z caller=http.go:194 level=debug traceID=09e7b429b8b1ece0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.620972ms" +ts=2024-05-02T12:17:23.97702851Z caller=http.go:194 level=debug traceID=47b5dcd0e06d00d9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.41753ms" +ts=2024-05-02T12:17:23.975128895Z caller=http.go:194 level=debug traceID=75ee556d646f2a11 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.195951ms" +ts=2024-05-02T12:17:23.974781075Z caller=http.go:194 level=debug traceID=19d808e0f56c09db orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.736302ms" +ts=2024-05-02T12:17:23.974699827Z caller=http.go:194 level=debug traceID=544bbd0a41494588 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.057196ms" +ts=2024-05-02T12:17:23.9735677Z caller=http.go:194 level=debug traceID=60178a5517fad94f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.311134ms" +ts=2024-05-02T12:17:23.972471581Z caller=http.go:194 level=debug traceID=28adcc5810093539 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.566089ms" +ts=2024-05-02T12:17:23.972197624Z caller=http.go:194 level=debug traceID=379cce12fd4e4f29 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.927717ms" +ts=2024-05-02T12:17:23.972070426Z caller=http.go:194 level=debug traceID=4d8a4d712797fc32 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 13.715341ms" +ts=2024-05-02T12:17:23.971732565Z caller=http.go:194 level=debug traceID=7f833857748ee68b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.138449ms" +ts=2024-05-02T12:17:23.971744714Z caller=http.go:194 level=debug traceID=175a1fda35309355 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.462226ms" +ts=2024-05-02T12:17:23.971064493Z caller=http.go:194 level=debug traceID=402e34409cc22484 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.886206ms" +ts=2024-05-02T12:17:23.969940525Z caller=http.go:194 level=debug traceID=3cd413a1bb6f131e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.839627ms" +ts=2024-05-02T12:17:23.968550342Z caller=http.go:194 level=debug traceID=00296b49aa3b90f9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.595852ms" +ts=2024-05-02T12:17:23.968417902Z caller=http.go:194 level=debug traceID=49b04e6b2346ed41 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.710994ms" +ts=2024-05-02T12:17:23.968348464Z caller=http.go:194 level=debug traceID=3f6161c11bf029e8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.84463ms" +ts=2024-05-02T12:17:23.967921096Z caller=http.go:194 level=debug traceID=2223839516656616 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.000169ms" +ts=2024-05-02T12:17:23.966714589Z caller=http.go:194 level=debug traceID=6e45d582e77112a5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.85812ms" +ts=2024-05-02T12:17:23.966093632Z caller=http.go:194 level=debug traceID=00aaf7e463bec4df orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.745236ms" +ts=2024-05-02T12:17:23.965509871Z caller=http.go:194 level=debug traceID=174b6f03419e48e1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.178468ms" +ts=2024-05-02T12:17:23.964707885Z caller=http.go:194 level=debug traceID=60178a5517fad94f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 20.830782ms" +ts=2024-05-02T12:17:23.962784113Z caller=http.go:194 level=debug traceID=7834247bcfe6c697 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.036297ms" +ts=2024-05-02T12:17:23.962547898Z caller=http.go:194 level=debug traceID=19d808e0f56c09db orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.178067ms" +ts=2024-05-02T12:17:23.962449439Z caller=http.go:194 level=debug traceID=28adcc5810093539 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.57158ms" +ts=2024-05-02T12:17:23.9619494Z caller=http.go:194 level=debug traceID=7c335e5543e3899a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.927366ms" +ts=2024-05-02T12:17:23.961086703Z caller=http.go:194 level=debug traceID=175a1fda35309355 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.719704ms" +ts=2024-05-02T12:17:23.960945313Z caller=http.go:194 level=debug traceID=4d8a4d712797fc32 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.223414ms" +ts=2024-05-02T12:17:23.960757972Z caller=http.go:194 level=debug traceID=02280eceb0cd4350 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.7617ms" +ts=2024-05-02T12:17:23.960101058Z caller=http.go:194 level=debug traceID=3580e7ea21023d72 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.358713ms" +ts=2024-05-02T12:17:23.959097261Z caller=http.go:194 level=debug traceID=522e01a87929f1d2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.629325ms" +ts=2024-05-02T12:17:23.958653479Z caller=http.go:194 level=debug traceID=7fc3074fff11c82c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.524228ms" +ts=2024-05-02T12:17:23.9585304Z caller=http.go:194 level=debug traceID=13298974804aca60 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.707586ms" +ts=2024-05-02T12:17:23.956538142Z caller=http.go:194 level=debug traceID=2223839516656616 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.872765ms" +ts=2024-05-02T12:17:23.955415369Z caller=http.go:194 level=debug traceID=174b6f03419e48e1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.74443ms" +ts=2024-05-02T12:17:23.955120679Z caller=http.go:194 level=debug traceID=00aaf7e463bec4df orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.908726ms" +ts=2024-05-02T12:17:23.954802574Z caller=http.go:194 level=debug traceID=28c1acb26f489432 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 18.214851ms" +ts=2024-05-02T12:17:23.95452138Z caller=http.go:194 level=debug traceID=2b43e6b15bd0677d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.385255ms" +ts=2024-05-02T12:17:23.954179064Z caller=http.go:194 level=debug traceID=00296b49aa3b90f9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.513743ms" +ts=2024-05-02T12:17:23.953410985Z caller=http.go:194 level=debug traceID=749dbe3a24ffa8c4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.19261ms" +ts=2024-05-02T12:17:23.952865809Z caller=http.go:194 level=debug traceID=460efc57a2f47525 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.76261ms" +ts=2024-05-02T12:17:23.952492163Z caller=http.go:194 level=debug traceID=5d3dec6398f7b71b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 172.614µs" +ts=2024-05-02T12:17:23.95233461Z caller=http.go:194 level=debug traceID=31efdda304001c4f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.355939ms" +ts=2024-05-02T12:17:23.951829349Z caller=http.go:194 level=debug traceID=7834247bcfe6c697 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.122285ms" +ts=2024-05-02T12:17:23.951271002Z caller=http.go:194 level=debug traceID=2e542d5d05c588a1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.26701ms" +ts=2024-05-02T12:17:23.950125278Z caller=http.go:194 level=debug traceID=7c335e5543e3899a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.596416ms" +ts=2024-05-02T12:17:23.949506021Z caller=http.go:194 level=debug traceID=02280eceb0cd4350 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.940968ms" +ts=2024-05-02T12:17:23.949431873Z caller=http.go:194 level=debug traceID=3580e7ea21023d72 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.900152ms" +ts=2024-05-02T12:17:23.948709201Z caller=http.go:194 level=debug traceID=7fc3074fff11c82c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.974229ms" +ts=2024-05-02T12:17:23.948469574Z caller=http.go:194 level=debug traceID=138f688b31d85641 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.27064ms" +ts=2024-05-02T12:17:23.947698214Z caller=http.go:194 level=debug traceID=0ddc0c6153d27d48 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.879816ms" +ts=2024-05-02T12:17:23.946861956Z caller=http.go:194 level=debug traceID=13298974804aca60 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.921548ms" +ts=2024-05-02T12:17:23.946695458Z caller=http.go:194 level=debug traceID=5a7403c330be0fe3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.061897ms" +ts=2024-05-02T12:17:23.946548389Z caller=http.go:194 level=debug traceID=503c19d115512118 orgID=3648 msg="POST /push.v1.PusherService/Push (400) 103.389µs" +ts=2024-05-02T12:17:23.94630404Z caller=http.go:194 level=debug traceID=22ff211922806d51 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.477024ms" +ts=2024-05-02T12:17:23.944932386Z caller=http.go:194 level=debug traceID=26e52a17bdd91e57 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 244.471µs" +ts=2024-05-02T12:17:23.944392495Z caller=http.go:194 level=debug traceID=6f00b49292ffd98e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.032242ms" +ts=2024-05-02T12:17:23.944399036Z caller=http.go:194 level=debug traceID=1348abf306be8c54 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.964322ms" +ts=2024-05-02T12:17:23.944208402Z caller=http.go:194 level=debug traceID=1f16afad6288ed16 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.236637ms" +ts=2024-05-02T12:17:23.943917323Z caller=http.go:194 level=debug traceID=7f2996cba17c304c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.496343ms" +ts=2024-05-02T12:17:23.943734043Z caller=http.go:194 level=debug traceID=522e01a87929f1d2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.832595ms" +ts=2024-05-02T12:17:23.943036372Z caller=http.go:194 level=debug traceID=31efdda304001c4f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.441625ms" +ts=2024-05-02T12:17:23.942919922Z caller=http.go:194 level=debug traceID=2b43e6b15bd0677d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.820834ms" +ts=2024-05-02T12:17:23.942882253Z caller=http.go:194 level=debug traceID=749dbe3a24ffa8c4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.815661ms" +ts=2024-05-02T12:17:23.942681548Z caller=http.go:194 level=debug traceID=713191f50c81e057 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.025838ms" +ts=2024-05-02T12:17:23.942467456Z caller=http.go:194 level=debug traceID=460efc57a2f47525 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.443392ms" +ts=2024-05-02T12:17:23.941782442Z caller=http.go:194 level=debug traceID=5d3dec6398f7b71b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 456.34µs" +ts=2024-05-02T12:17:23.94136481Z caller=http.go:194 level=debug traceID=2e4ca1510528bb8e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.219792ms" +ts=2024-05-02T12:17:23.940842411Z caller=http.go:194 level=debug traceID=3a82e6b1739c51f3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.07753ms" +ts=2024-05-02T12:17:23.940579458Z caller=http.go:194 level=debug traceID=28c1acb26f489432 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.775354ms" +ts=2024-05-02T12:17:23.940323533Z caller=http.go:194 level=debug traceID=2e542d5d05c588a1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.844593ms" +ts=2024-05-02T12:17:23.939616388Z caller=http.go:194 level=debug traceID=7c43d2a612b712d6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.893409ms" +ts=2024-05-02T12:17:23.938568315Z caller=http.go:194 level=debug traceID=7564d4af19e92227 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.573905ms" +ts=2024-05-02T12:17:23.938286971Z caller=http.go:194 level=debug traceID=0663eb3c6354578e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.36725ms" +ts=2024-05-02T12:17:23.9383327Z caller=http.go:194 level=debug traceID=049ed00d76ba01d2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.573697ms" +ts=2024-05-02T12:17:23.937689105Z caller=http.go:194 level=debug traceID=0ddc0c6153d27d48 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.982797ms" +ts=2024-05-02T12:17:23.936900532Z caller=http.go:194 level=debug traceID=503c19d115512118 orgID=1218 msg="POST /push.v1.PusherService/Push (400) 219.313µs" +ts=2024-05-02T12:17:23.936491237Z caller=http.go:194 level=debug traceID=22ff211922806d51 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.235759ms" +ts=2024-05-02T12:17:23.936275651Z caller=http.go:194 level=debug traceID=15b53a13719c59b5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.337309ms" +ts=2024-05-02T12:17:23.935728184Z caller=http.go:194 level=debug traceID=5a7403c330be0fe3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.103584ms" +ts=2024-05-02T12:17:23.935139264Z caller=http.go:194 level=debug traceID=138f688b31d85641 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.503683ms" +ts=2024-05-02T12:17:23.934470485Z caller=http.go:194 level=debug traceID=6f00b49292ffd98e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.213503ms" +ts=2024-05-02T12:17:23.933957364Z caller=http.go:194 level=debug traceID=26e52a17bdd91e57 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 440.036µs" +ts=2024-05-02T12:17:23.933714043Z caller=http.go:194 level=debug traceID=1f16afad6288ed16 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.957808ms" +ts=2024-05-02T12:17:23.93317644Z caller=http.go:194 level=debug traceID=713191f50c81e057 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.692006ms" +ts=2024-05-02T12:17:23.933101059Z caller=http.go:194 level=debug traceID=1348abf306be8c54 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.953106ms" +ts=2024-05-02T12:17:23.933043424Z caller=http.go:194 level=debug traceID=2e5727f490ae3649 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.477103ms" +ts=2024-05-02T12:17:23.932595192Z caller=http.go:194 level=debug traceID=00b57196b1ad6339 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.30364ms" +ts=2024-05-02T12:17:23.932602826Z caller=http.go:194 level=debug traceID=1366b4711ba57530 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 231.425µs" +ts=2024-05-02T12:17:23.932268716Z caller=http.go:194 level=debug traceID=2e4ca1510528bb8e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.41338ms" +ts=2024-05-02T12:17:23.931967962Z caller=http.go:194 level=debug traceID=5c51dd13c7f321a9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.680308ms" +ts=2024-05-02T12:17:23.931904305Z caller=http.go:194 level=debug traceID=7f2996cba17c304c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.960081ms" +ts=2024-05-02T12:17:23.930191731Z caller=http.go:194 level=debug traceID=3a82e6b1739c51f3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.669812ms" +ts=2024-05-02T12:17:23.929311023Z caller=http.go:194 level=debug traceID=5a851d9f3c28e71e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.199666ms" +ts=2024-05-02T12:17:23.929215509Z caller=http.go:194 level=debug traceID=42a67e8541884ed3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.035417ms" +ts=2024-05-02T12:17:23.928910195Z caller=http.go:194 level=debug traceID=049ed00d76ba01d2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.342255ms" +ts=2024-05-02T12:17:23.928332294Z caller=http.go:194 level=debug traceID=7c43d2a612b712d6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.719069ms" +ts=2024-05-02T12:17:23.92716417Z caller=http.go:194 level=debug traceID=7564d4af19e92227 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.206821ms" +ts=2024-05-02T12:17:23.926046262Z caller=http.go:194 level=debug traceID=0663eb3c6354578e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.740917ms" +ts=2024-05-02T12:17:23.925908548Z caller=http.go:194 level=debug traceID=15b53a13719c59b5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.435713ms" +ts=2024-05-02T12:17:23.923161386Z caller=http.go:194 level=debug traceID=413bce442b254157 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.163931ms" +ts=2024-05-02T12:17:23.922154263Z caller=http.go:194 level=debug traceID=2e07625bbe64c648 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.869645ms" +ts=2024-05-02T12:17:23.921664575Z caller=http.go:194 level=debug traceID=0d7df0f3b4620776 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.206729ms" +ts=2024-05-02T12:17:23.921405261Z caller=http.go:194 level=debug traceID=1366b4711ba57530 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 400.955µs" +ts=2024-05-02T12:17:23.921469951Z caller=http.go:194 level=debug traceID=2d93e3da59a109dc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.116455ms" +ts=2024-05-02T12:17:23.921180963Z caller=http.go:194 level=debug traceID=03de8891701c3e65 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.097837ms" +ts=2024-05-02T12:17:23.92097835Z caller=http.go:194 level=debug traceID=00b57196b1ad6339 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.213289ms" +ts=2024-05-02T12:17:23.920557613Z caller=http.go:194 level=debug traceID=51cfb824f54ac7b4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.818013ms" +ts=2024-05-02T12:17:23.920590466Z caller=http.go:194 level=debug traceID=5c51dd13c7f321a9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.969143ms" +ts=2024-05-02T12:17:23.920562925Z caller=http.go:194 level=debug traceID=7b71c7d6b70f0957 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.317169ms" +ts=2024-05-02T12:17:23.920567661Z caller=http.go:194 level=debug traceID=2e5727f490ae3649 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.305334ms" +ts=2024-05-02T12:17:23.919405398Z caller=http.go:194 level=debug traceID=42a67e8541884ed3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.570767ms" +ts=2024-05-02T12:17:23.918578846Z caller=http.go:194 level=debug traceID=5a851d9f3c28e71e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.761444ms" +ts=2024-05-02T12:17:23.917930209Z caller=http.go:194 level=debug traceID=67b8b37ff78c93cd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.461256ms" +ts=2024-05-02T12:17:23.91659969Z caller=http.go:194 level=debug traceID=1e50ff9a631dd528 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.527787ms" +ts=2024-05-02T12:17:23.915704106Z caller=http.go:194 level=debug traceID=064e2753398306a3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.061602ms" +ts=2024-05-02T12:17:23.914347018Z caller=http.go:194 level=debug traceID=77d574db79b47015 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.985788ms" +ts=2024-05-02T12:17:23.913928826Z caller=http.go:194 level=debug traceID=32677848d796deac orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.712256ms" +ts=2024-05-02T12:17:23.913789717Z caller=http.go:194 level=debug traceID=03de8891701c3e65 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 25.365255ms" +ts=2024-05-02T12:17:23.912226217Z caller=http.go:194 level=debug traceID=2e07625bbe64c648 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.22314ms" +ts=2024-05-02T12:17:23.912015181Z caller=http.go:194 level=debug traceID=413bce442b254157 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.265861ms" +ts=2024-05-02T12:17:23.911225402Z caller=http.go:194 level=debug traceID=2d93e3da59a109dc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.997802ms" +ts=2024-05-02T12:17:23.910799144Z caller=http.go:194 level=debug traceID=51cfb824f54ac7b4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.221475ms" +ts=2024-05-02T12:17:23.910500649Z caller=http.go:194 level=debug traceID=3c9888863896d618 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 15.18526ms" +ts=2024-05-02T12:17:23.910028241Z caller=http.go:194 level=debug traceID=324c0d7f0fc0ab71 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.06813ms" +ts=2024-05-02T12:17:23.909614465Z caller=http.go:194 level=debug traceID=549f40a3c1909be0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.155478ms" +ts=2024-05-02T12:17:23.909307389Z caller=http.go:194 level=debug traceID=0d7df0f3b4620776 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.402096ms" +ts=2024-05-02T12:17:23.908989085Z caller=http.go:194 level=debug traceID=7b71c7d6b70f0957 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.558372ms" +ts=2024-05-02T12:17:23.908121277Z caller=http.go:194 level=debug traceID=67b8b37ff78c93cd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.753867ms" +ts=2024-05-02T12:17:23.90742876Z caller=http.go:194 level=debug traceID=21487fdd5046212e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.501828ms" +ts=2024-05-02T12:17:23.907305718Z caller=http.go:194 level=debug traceID=5d40f42424bacb5e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.013166ms" +ts=2024-05-02T12:17:23.905653823Z caller=http.go:194 level=debug traceID=1e50ff9a631dd528 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.767127ms" +ts=2024-05-02T12:17:23.905639963Z caller=http.go:194 level=debug traceID=02d5f66a4e691283 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.797602ms" +ts=2024-05-02T12:17:23.905244143Z caller=http.go:194 level=debug traceID=42fe972b8abf66d3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.101699ms" +ts=2024-05-02T12:17:23.904873876Z caller=http.go:194 level=debug traceID=13efcb54d5907934 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.853902ms" +ts=2024-05-02T12:17:23.904380908Z caller=http.go:194 level=debug traceID=064e2753398306a3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.206276ms" +ts=2024-05-02T12:17:23.903577132Z caller=http.go:194 level=debug traceID=48d68c89f221563c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.583897ms" +ts=2024-05-02T12:17:23.903429845Z caller=http.go:194 level=debug traceID=77d574db79b47015 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.83498ms" +ts=2024-05-02T12:17:23.902359684Z caller=http.go:194 level=debug traceID=754c4d0ca650d95e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.892422ms" +ts=2024-05-02T12:17:23.90205678Z caller=http.go:194 level=debug traceID=3b7580e616d66e77 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.837658ms" +ts=2024-05-02T12:17:23.901925792Z caller=http.go:194 level=debug traceID=32677848d796deac orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.571774ms" +ts=2024-05-02T12:17:23.901901345Z caller=http.go:194 level=debug traceID=016faafa4cdf3c17 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.564031ms" +ts=2024-05-02T12:17:23.90124073Z caller=http.go:194 level=debug traceID=0b4c27f81b600cf9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.001434ms" +ts=2024-05-02T12:17:23.899196695Z caller=http.go:194 level=debug traceID=7cb37b5c4ef97fb7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.058132ms" +ts=2024-05-02T12:17:23.898869092Z caller=http.go:194 level=debug traceID=549f40a3c1909be0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.715176ms" +ts=2024-05-02T12:17:23.898511331Z caller=http.go:194 level=debug traceID=425ec4f4c9515575 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.40036ms" +ts=2024-05-02T12:17:23.898290226Z caller=http.go:194 level=debug traceID=324c0d7f0fc0ab71 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.044601ms" +ts=2024-05-02T12:17:23.897182432Z caller=http.go:194 level=debug traceID=3c9888863896d618 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.511969ms" +ts=2024-05-02T12:17:23.897195617Z caller=http.go:194 level=debug traceID=4adcb5c0bf794112 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.750985ms" +ts=2024-05-02T12:17:23.895690882Z caller=http.go:194 level=debug traceID=5d40f42424bacb5e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.816154ms" +ts=2024-05-02T12:17:23.895712302Z caller=http.go:194 level=debug traceID=21487fdd5046212e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.516716ms" +ts=2024-05-02T12:17:23.895492651Z caller=http.go:194 level=debug traceID=02d5f66a4e691283 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.776501ms" +ts=2024-05-02T12:17:23.894867136Z caller=http.go:194 level=debug traceID=56941405a4d8ba13 orgID=3648 msg="POST /push.v1.PusherService/Push (400) 152.231µs" +ts=2024-05-02T12:17:23.894558278Z caller=http.go:194 level=debug traceID=29398af271133bf3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.319439ms" +ts=2024-05-02T12:17:23.894293039Z caller=http.go:194 level=debug traceID=74664d2203d78627 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.567676ms" +ts=2024-05-02T12:17:23.894047036Z caller=http.go:194 level=debug traceID=13efcb54d5907934 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.380384ms" +ts=2024-05-02T12:17:23.893296795Z caller=http.go:194 level=debug traceID=42fe972b8abf66d3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.048202ms" +ts=2024-05-02T12:17:23.891608095Z caller=http.go:194 level=debug traceID=48d68c89f221563c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.711031ms" +ts=2024-05-02T12:17:23.891178348Z caller=http.go:194 level=debug traceID=016faafa4cdf3c17 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.818063ms" +ts=2024-05-02T12:17:23.890915285Z caller=http.go:194 level=debug traceID=3b7580e616d66e77 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.007775ms" +ts=2024-05-02T12:17:23.890866465Z caller=http.go:194 level=debug traceID=754c4d0ca650d95e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.991796ms" +ts=2024-05-02T12:17:23.889449848Z caller=http.go:194 level=debug traceID=394ad2c1c1afff31 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.535186ms" +ts=2024-05-02T12:17:23.888554988Z caller=http.go:194 level=debug traceID=290d42527e25cf6d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.253534ms" +ts=2024-05-02T12:17:23.888464582Z caller=http.go:194 level=debug traceID=0b4c27f81b600cf9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.876548ms" +ts=2024-05-02T12:17:23.888341881Z caller=http.go:194 level=debug traceID=43d7b7b62b64c1d8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.696831ms" +ts=2024-05-02T12:17:23.887346941Z caller=http.go:194 level=debug traceID=06f8522ff3823665 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.163867ms" +ts=2024-05-02T12:17:23.887314622Z caller=http.go:194 level=debug traceID=425ec4f4c9515575 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.656782ms" +ts=2024-05-02T12:17:23.887020483Z caller=http.go:194 level=debug traceID=7cb37b5c4ef97fb7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.319895ms" +ts=2024-05-02T12:17:23.886371777Z caller=http.go:194 level=debug traceID=4adcb5c0bf794112 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.20099ms" +ts=2024-05-02T12:17:23.885527869Z caller=http.go:194 level=debug traceID=0f44a523e81a7d3b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.138464ms" +ts=2024-05-02T12:17:23.884811813Z caller=http.go:194 level=debug traceID=1cd6b06a19b6b008 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.737159ms" +ts=2024-05-02T12:17:23.883826931Z caller=http.go:194 level=debug traceID=56941405a4d8ba13 orgID=1218 msg="POST /push.v1.PusherService/Push (400) 219.96µs" +ts=2024-05-02T12:17:23.883616128Z caller=http.go:194 level=debug traceID=29398af271133bf3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.905568ms" +ts=2024-05-02T12:17:23.882491758Z caller=http.go:194 level=debug traceID=74664d2203d78627 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.336764ms" +ts=2024-05-02T12:17:23.882256967Z caller=http.go:194 level=debug traceID=5b80cf5c7516920d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.62329ms" +ts=2024-05-02T12:17:23.88176637Z caller=http.go:194 level=debug traceID=20c5394903d7c576 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.418477ms" +ts=2024-05-02T12:17:23.88107657Z caller=http.go:194 level=debug traceID=4bec939cad47e0ca orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.387929ms" +ts=2024-05-02T12:17:23.880603932Z caller=http.go:194 level=debug traceID=3e8f90e639a7207a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.188313ms" +ts=2024-05-02T12:17:23.879515602Z caller=http.go:194 level=debug traceID=5c37b8ebc149e005 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 320.422µs" +ts=2024-05-02T12:17:23.879313821Z caller=http.go:194 level=debug traceID=394ad2c1c1afff31 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.307968ms" +ts=2024-05-02T12:17:23.87919823Z caller=http.go:194 level=debug traceID=7f5ae570f26cf7fe orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.458914ms" +ts=2024-05-02T12:17:23.879250646Z caller=http.go:194 level=debug traceID=02923bc212d21610 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.634519ms" +ts=2024-05-02T12:17:23.877358834Z caller=http.go:194 level=debug traceID=290d42527e25cf6d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.330134ms" +ts=2024-05-02T12:17:23.87670722Z caller=http.go:194 level=debug traceID=43d7b7b62b64c1d8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.538137ms" +ts=2024-05-02T12:17:23.875745836Z caller=http.go:194 level=debug traceID=0e2a26c1960c1190 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.196273ms" +ts=2024-05-02T12:17:23.875678822Z caller=http.go:194 level=debug traceID=06f8522ff3823665 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.905829ms" +ts=2024-05-02T12:17:23.875259657Z caller=http.go:194 level=debug traceID=1cd6b06a19b6b008 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.039814ms" +ts=2024-05-02T12:17:23.875007181Z caller=http.go:194 level=debug traceID=0f44a523e81a7d3b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.825553ms" +ts=2024-05-02T12:17:23.874571531Z caller=http.go:194 level=debug traceID=562511dffd737a38 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.725584ms" +ts=2024-05-02T12:17:23.874479997Z caller=http.go:194 level=debug traceID=0f32a5085c60507c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.097712ms" +ts=2024-05-02T12:17:23.87337662Z caller=http.go:194 level=debug traceID=2910cc4972a054cb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 372.37µs" +ts=2024-05-02T12:17:23.873326554Z caller=http.go:194 level=debug traceID=54254e0f081f5f80 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.652897ms" +ts=2024-05-02T12:17:23.873116517Z caller=http.go:194 level=debug traceID=60557d8c6d629355 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.157956ms" +ts=2024-05-02T12:17:23.872042422Z caller=http.go:194 level=debug traceID=3e8f90e639a7207a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.778434ms" +ts=2024-05-02T12:17:23.871844753Z caller=http.go:194 level=debug traceID=20c5394903d7c576 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.650953ms" +ts=2024-05-02T12:17:23.87157042Z caller=http.go:194 level=debug traceID=4bec939cad47e0ca orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.304898ms" +ts=2024-05-02T12:17:23.870558093Z caller=http.go:194 level=debug traceID=5b80cf5c7516920d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.524765ms" +ts=2024-05-02T12:17:23.86921398Z caller=http.go:194 level=debug traceID=02923bc212d21610 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.758831ms" +ts=2024-05-02T12:17:23.869218981Z caller=http.go:194 level=debug traceID=1f554d7097455dbc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.666229ms" +ts=2024-05-02T12:17:23.868692239Z caller=http.go:194 level=debug traceID=0acf84fd89ad8364 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.011832ms" +ts=2024-05-02T12:17:23.868242648Z caller=http.go:194 level=debug traceID=5c37b8ebc149e005 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 377.924µs" +ts=2024-05-02T12:17:23.867223443Z caller=http.go:194 level=debug traceID=7f5ae570f26cf7fe orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.859751ms" +ts=2024-05-02T12:17:23.865675122Z caller=http.go:194 level=debug traceID=20d155c4f360957a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.789092ms" +ts=2024-05-02T12:17:23.86493321Z caller=http.go:194 level=debug traceID=0e2a26c1960c1190 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.638116ms" +ts=2024-05-02T12:17:23.864505811Z caller=http.go:194 level=debug traceID=0eb65196e5e73bb2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.406603ms" +ts=2024-05-02T12:17:23.864546468Z caller=http.go:194 level=debug traceID=0f32a5085c60507c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.438403ms" +ts=2024-05-02T12:17:23.864286983Z caller=http.go:194 level=debug traceID=54254e0f081f5f80 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.161094ms" +ts=2024-05-02T12:17:23.864095897Z caller=http.go:194 level=debug traceID=562511dffd737a38 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.252989ms" +ts=2024-05-02T12:17:23.862934305Z caller=http.go:194 level=debug traceID=2910cc4972a054cb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 292.743µs" +ts=2024-05-02T12:17:23.862509076Z caller=http.go:194 level=debug traceID=6b10aaacab888bf6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.258226ms" +ts=2024-05-02T12:17:23.861251735Z caller=http.go:194 level=debug traceID=1f554d7097455dbc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.793334ms" +ts=2024-05-02T12:17:23.860981882Z caller=http.go:194 level=debug traceID=60557d8c6d629355 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.770951ms" +ts=2024-05-02T12:17:23.860931859Z caller=http.go:194 level=debug traceID=0940696310df4c2c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 369.426µs" +ts=2024-05-02T12:17:23.859592722Z caller=http.go:194 level=debug traceID=1aebe98f7a72e57e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.366357ms" +ts=2024-05-02T12:17:23.858154641Z caller=http.go:194 level=debug traceID=4ba7cadee77536fa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.211362ms" +ts=2024-05-02T12:17:23.858127334Z caller=http.go:194 level=debug traceID=4b174eece6586872 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.903997ms" +ts=2024-05-02T12:17:23.856274607Z caller=http.go:194 level=debug traceID=0acf84fd89ad8364 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.543716ms" +ts=2024-05-02T12:17:23.855569027Z caller=http.go:194 level=debug traceID=7df8d99a66e23b78 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.211261ms" +ts=2024-05-02T12:17:23.85479295Z caller=http.go:194 level=debug traceID=20d155c4f360957a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.604244ms" +ts=2024-05-02T12:17:23.85385263Z caller=http.go:194 level=debug traceID=0eb65196e5e73bb2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.985962ms" +ts=2024-05-02T12:17:23.853605146Z caller=http.go:194 level=debug traceID=5f5e945cd400eaa7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.014917ms" +ts=2024-05-02T12:17:23.853332786Z caller=http.go:194 level=debug traceID=76726837754128aa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 270.118µs" +ts=2024-05-02T12:17:23.853028511Z caller=http.go:194 level=debug traceID=10324ddedb8ba769 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.737756ms" +ts=2024-05-02T12:17:23.852288931Z caller=http.go:194 level=debug traceID=6b10aaacab888bf6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.445282ms" +ts=2024-05-02T12:17:23.850146928Z caller=http.go:194 level=debug traceID=0940696310df4c2c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 489.197µs" +ts=2024-05-02T12:17:23.849583641Z caller=http.go:194 level=debug traceID=1aebe98f7a72e57e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.587344ms" +ts=2024-05-02T12:17:23.848960108Z caller=http.go:194 level=debug traceID=0bce123691c37176 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.086986ms" +ts=2024-05-02T12:17:23.847699074Z caller=http.go:194 level=debug traceID=4ba7cadee77536fa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.142498ms" +ts=2024-05-02T12:17:23.846747233Z caller=http.go:194 level=debug traceID=4b174eece6586872 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.395452ms" +ts=2024-05-02T12:17:23.846682421Z caller=http.go:194 level=debug traceID=583e16afa1489a3a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 15.00894ms" +ts=2024-05-02T12:17:23.843969034Z caller=http.go:194 level=debug traceID=7df8d99a66e23b78 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.819039ms" +ts=2024-05-02T12:17:23.842068573Z caller=http.go:194 level=debug traceID=76726837754128aa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 322.606µs" +ts=2024-05-02T12:17:23.841772395Z caller=http.go:194 level=debug traceID=5f5e945cd400eaa7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.713014ms" +ts=2024-05-02T12:17:23.840919071Z caller=http.go:194 level=debug traceID=10324ddedb8ba769 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.292471ms" +ts=2024-05-02T12:17:23.841039691Z caller=http.go:194 level=debug traceID=572cecb7f2032eb2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.707297ms" +ts=2024-05-02T12:17:23.840678567Z caller=http.go:194 level=debug traceID=779836f018fc7c96 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.72924ms" +ts=2024-05-02T12:17:23.839397424Z caller=http.go:194 level=debug traceID=3397c04d859e892f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.594737ms" +ts=2024-05-02T12:17:23.838574416Z caller=http.go:194 level=debug traceID=0bce123691c37176 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.11999ms" +ts=2024-05-02T12:17:23.834851681Z caller=http.go:194 level=debug traceID=5667062aad981565 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.329715ms" +ts=2024-05-02T12:17:23.833873546Z caller=http.go:194 level=debug traceID=055705f8e7b160b7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.63414ms" +ts=2024-05-02T12:17:23.833342Z caller=http.go:194 level=debug traceID=7cf2c3eb415cf43d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.892482ms" +ts=2024-05-02T12:17:23.832719434Z caller=http.go:194 level=debug traceID=583e16afa1489a3a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.049298ms" +ts=2024-05-02T12:17:23.831653309Z caller=http.go:194 level=debug traceID=572cecb7f2032eb2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.415258ms" +ts=2024-05-02T12:17:23.830804576Z caller=http.go:194 level=debug traceID=779836f018fc7c96 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.740241ms" +ts=2024-05-02T12:17:23.829594214Z caller=http.go:194 level=debug traceID=3397c04d859e892f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.131994ms" +ts=2024-05-02T12:17:23.828271348Z caller=http.go:194 level=debug traceID=110329cbb661b36b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.664867ms" +ts=2024-05-02T12:17:23.828227755Z caller=http.go:194 level=debug traceID=696805d0ecd62ea4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.273488ms" +ts=2024-05-02T12:17:23.827941889Z caller=http.go:194 level=debug traceID=0bdbbfad0a508612 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.574212ms" +ts=2024-05-02T12:17:23.827445946Z caller=http.go:194 level=debug traceID=0961dc9afd3c873f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.80366ms" +ts=2024-05-02T12:17:23.827354829Z caller=http.go:194 level=debug traceID=6063948921dde7f9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.041261ms" +ts=2024-05-02T12:17:23.826014276Z caller=http.go:194 level=debug traceID=745b45fc588676cd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.490844ms" +ts=2024-05-02T12:17:23.824318966Z caller=http.go:194 level=debug traceID=5667062aad981565 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.630071ms" +ts=2024-05-02T12:17:23.823047551Z caller=http.go:194 level=debug traceID=7cf2c3eb415cf43d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.785355ms" +ts=2024-05-02T12:17:23.822516725Z caller=http.go:194 level=debug traceID=055705f8e7b160b7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.73891ms" +ts=2024-05-02T12:17:23.820248614Z caller=http.go:194 level=debug traceID=19f810a928473a1f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.174695ms" +ts=2024-05-02T12:17:23.819758188Z caller=http.go:194 level=debug traceID=251879656de2c383 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.986837ms" +ts=2024-05-02T12:17:23.819660085Z caller=http.go:194 level=debug traceID=04a17c2fbde8f176 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.569797ms" +ts=2024-05-02T12:17:23.81831841Z caller=http.go:194 level=debug traceID=110329cbb661b36b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.108939ms" +ts=2024-05-02T12:17:23.817863141Z caller=http.go:194 level=debug traceID=00299bf86d73eea2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.603668ms" +ts=2024-05-02T12:17:23.817440137Z caller=http.go:194 level=debug traceID=11a5e847cf034656 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.060713ms" +ts=2024-05-02T12:17:23.817503325Z caller=http.go:194 level=debug traceID=696805d0ecd62ea4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.515625ms" +ts=2024-05-02T12:17:23.817365237Z caller=http.go:194 level=debug traceID=0961dc9afd3c873f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.873641ms" +ts=2024-05-02T12:17:23.817250697Z caller=http.go:194 level=debug traceID=2cfe85ad901a5abc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.934111ms" +ts=2024-05-02T12:17:23.816239141Z caller=http.go:194 level=debug traceID=0bdbbfad0a508612 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.147817ms" +ts=2024-05-02T12:17:23.815190695Z caller=http.go:194 level=debug traceID=6063948921dde7f9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.644871ms" +ts=2024-05-02T12:17:23.815084668Z caller=http.go:194 level=debug traceID=745b45fc588676cd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.882012ms" +ts=2024-05-02T12:17:23.8141806Z caller=http.go:194 level=debug traceID=267235924d145dbc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.753083ms" +ts=2024-05-02T12:17:23.814198548Z caller=http.go:194 level=debug traceID=6d4e877248ac4c85 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.137406ms" +ts=2024-05-02T12:17:23.812337737Z caller=http.go:194 level=debug traceID=7d795fb262ecda53 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.292362ms" +ts=2024-05-02T12:17:23.81174313Z caller=http.go:194 level=debug traceID=14caca8fe0cd090a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.833732ms" +ts=2024-05-02T12:17:23.811725878Z caller=http.go:194 level=debug traceID=3ed6ee1616ccc91b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.041014ms" +ts=2024-05-02T12:17:23.811285335Z caller=http.go:194 level=debug traceID=33faab9ede4775cd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.232088ms" +ts=2024-05-02T12:17:23.810175536Z caller=http.go:194 level=debug traceID=57d8021368a04ed4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.630464ms" +ts=2024-05-02T12:17:23.809121934Z caller=http.go:194 level=debug traceID=251879656de2c383 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.503085ms" +ts=2024-05-02T12:17:23.80886986Z caller=http.go:194 level=debug traceID=518677f4d65719d0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.277895ms" +ts=2024-05-02T12:17:23.80849178Z caller=http.go:194 level=debug traceID=04a17c2fbde8f176 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.4814ms" +ts=2024-05-02T12:17:23.807699209Z caller=http.go:194 level=debug traceID=19f810a928473a1f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.825111ms" +ts=2024-05-02T12:17:23.806120475Z caller=http.go:194 level=debug traceID=00299bf86d73eea2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.155146ms" +ts=2024-05-02T12:17:23.806091534Z caller=http.go:194 level=debug traceID=11a5e847cf034656 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.087839ms" +ts=2024-05-02T12:17:23.805926857Z caller=http.go:194 level=debug traceID=281563291f26b41b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.36112ms" +ts=2024-05-02T12:17:23.805383376Z caller=http.go:194 level=debug traceID=2cfe85ad901a5abc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.31694ms" +ts=2024-05-02T12:17:23.804061867Z caller=http.go:194 level=debug traceID=4371c370d968fd9e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.494636ms" +ts=2024-05-02T12:17:23.803656528Z caller=http.go:194 level=debug traceID=5941040ae58ae5f6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 654.073µs" +ts=2024-05-02T12:17:23.803063402Z caller=http.go:194 level=debug traceID=05a113f5609e5a87 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.28598ms" +ts=2024-05-02T12:17:23.802866705Z caller=http.go:194 level=debug traceID=267235924d145dbc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.920417ms" +ts=2024-05-02T12:17:23.801587217Z caller=http.go:194 level=debug traceID=6d4e877248ac4c85 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.401764ms" +ts=2024-05-02T12:17:23.801549836Z caller=http.go:194 level=debug traceID=3ed6ee1616ccc91b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.813274ms" +ts=2024-05-02T12:17:23.801257591Z caller=http.go:194 level=debug traceID=14caca8fe0cd090a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.492716ms" +ts=2024-05-02T12:17:23.801085007Z caller=http.go:194 level=debug traceID=717aa80aa7cdc5d1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.99032ms" +ts=2024-05-02T12:17:23.800351415Z caller=http.go:194 level=debug traceID=7d795fb262ecda53 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.591069ms" +ts=2024-05-02T12:17:23.800344815Z caller=http.go:194 level=debug traceID=72c4545e05e7e6cd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.664591ms" +ts=2024-05-02T12:17:23.798912463Z caller=http.go:194 level=debug traceID=33faab9ede4775cd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.774837ms" +ts=2024-05-02T12:17:23.798703247Z caller=http.go:194 level=debug traceID=57d8021368a04ed4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.570732ms" +ts=2024-05-02T12:17:23.798458372Z caller=http.go:194 level=debug traceID=5ecfbc28b0f91761 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.541786ms" +ts=2024-05-02T12:17:23.797770661Z caller=http.go:194 level=debug traceID=2c1299b29fcb6da6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.338006ms" +ts=2024-05-02T12:17:23.797214808Z caller=http.go:194 level=debug traceID=518677f4d65719d0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.351685ms" +ts=2024-05-02T12:17:23.795402488Z caller=http.go:194 level=debug traceID=1cba5bf357ab0f6e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.641994ms" +ts=2024-05-02T12:17:23.795130258Z caller=http.go:194 level=debug traceID=1309de0a12100d8c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.556849ms" +ts=2024-05-02T12:17:23.79503362Z caller=http.go:194 level=debug traceID=5d68b14b28bd9c61 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.128979ms" +ts=2024-05-02T12:17:23.793638605Z caller=http.go:194 level=debug traceID=281563291f26b41b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.724698ms" +ts=2024-05-02T12:17:23.793387396Z caller=http.go:194 level=debug traceID=1ae7b8e12d3e29f5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.674813ms" +ts=2024-05-02T12:17:23.792828764Z caller=http.go:194 level=debug traceID=5941040ae58ae5f6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 566.772µs" +ts=2024-05-02T12:17:23.792635007Z caller=http.go:194 level=debug traceID=4371c370d968fd9e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.795959ms" +ts=2024-05-02T12:17:23.79246663Z caller=http.go:194 level=debug traceID=05a113f5609e5a87 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.304298ms" +ts=2024-05-02T12:17:23.791553507Z caller=http.go:194 level=debug traceID=658bc5501ae9536f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.709915ms" +ts=2024-05-02T12:17:23.791411326Z caller=http.go:194 level=debug traceID=72c4545e05e7e6cd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.675764ms" +ts=2024-05-02T12:17:23.790533648Z caller=http.go:194 level=debug traceID=717aa80aa7cdc5d1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.730554ms" +ts=2024-05-02T12:17:23.788770569Z caller=http.go:194 level=debug traceID=0f8b4489f819b839 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.049721ms" +ts=2024-05-02T12:17:23.787877507Z caller=http.go:194 level=debug traceID=5ecfbc28b0f91761 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.940483ms" +ts=2024-05-02T12:17:23.786839258Z caller=http.go:194 level=debug traceID=3eab33a00493e747 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.948875ms" +ts=2024-05-02T12:17:23.786293535Z caller=http.go:194 level=debug traceID=1cba5bf357ab0f6e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.861304ms" +ts=2024-05-02T12:17:23.785429278Z caller=http.go:194 level=debug traceID=74c0f34b938be87b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.65056ms" +ts=2024-05-02T12:17:23.784967778Z caller=http.go:194 level=debug traceID=2b62675cd759547a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.571735ms" +ts=2024-05-02T12:17:23.784221794Z caller=http.go:194 level=debug traceID=2c1299b29fcb6da6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.087662ms" +ts=2024-05-02T12:17:23.784224451Z caller=http.go:194 level=debug traceID=1309de0a12100d8c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.952587ms" +ts=2024-05-02T12:17:23.783354056Z caller=http.go:194 level=debug traceID=21c98fffec857706 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.3047ms" +ts=2024-05-02T12:17:23.783321056Z caller=http.go:194 level=debug traceID=1ae7b8e12d3e29f5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.047137ms" +ts=2024-05-02T12:17:23.782927009Z caller=http.go:194 level=debug traceID=5d68b14b28bd9c61 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.901688ms" +ts=2024-05-02T12:17:23.782383513Z caller=http.go:194 level=debug traceID=658bc5501ae9536f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.371716ms" +ts=2024-05-02T12:17:23.780180763Z caller=http.go:194 level=debug traceID=45aaac0549d97b20 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.958213ms" +ts=2024-05-02T12:17:23.779223209Z caller=http.go:194 level=debug traceID=2dcf8fc96e39d11c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.846351ms" +ts=2024-05-02T12:17:23.779130119Z caller=http.go:194 level=debug traceID=1ef280c8808fb790 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.682819ms" +ts=2024-05-02T12:17:23.777753626Z caller=http.go:194 level=debug traceID=0f8b4489f819b839 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.590214ms" +ts=2024-05-02T12:17:23.777811019Z caller=http.go:194 level=debug traceID=242c8a129992c42c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.859076ms" +ts=2024-05-02T12:17:23.777430775Z caller=http.go:194 level=debug traceID=333cb92402968388 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.927727ms" +ts=2024-05-02T12:17:23.776669048Z caller=http.go:194 level=debug traceID=4cc2dfaaeb409224 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.898309ms" +ts=2024-05-02T12:17:23.776357765Z caller=http.go:194 level=debug traceID=3d8af856029e2360 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.203481ms" +ts=2024-05-02T12:17:23.775324421Z caller=http.go:194 level=debug traceID=3eab33a00493e747 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.287314ms" +ts=2024-05-02T12:17:23.774679149Z caller=http.go:194 level=debug traceID=2b62675cd759547a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.537593ms" +ts=2024-05-02T12:17:23.774525095Z caller=http.go:194 level=debug traceID=74c0f34b938be87b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.038034ms" +ts=2024-05-02T12:17:23.772575171Z caller=http.go:194 level=debug traceID=5032710714c5e9f2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.521118ms" +ts=2024-05-02T12:17:23.772067544Z caller=http.go:194 level=debug traceID=21c98fffec857706 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.762823ms" +ts=2024-05-02T12:17:23.771863591Z caller=http.go:194 level=debug traceID=4f619cb4606afcc4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.355763ms" +ts=2024-05-02T12:17:23.77112001Z caller=http.go:194 level=debug traceID=1a947a39bd5af05b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.795571ms" +ts=2024-05-02T12:17:23.770918759Z caller=http.go:194 level=debug traceID=00b0d27ac5be8f0c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.163689ms" +ts=2024-05-02T12:17:23.769802452Z caller=http.go:194 level=debug traceID=172fd7481714d48a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.490958ms" +ts=2024-05-02T12:17:23.768976177Z caller=http.go:194 level=debug traceID=3d8af856029e2360 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.292318ms" +ts=2024-05-02T12:17:23.769046723Z caller=http.go:194 level=debug traceID=45aaac0549d97b20 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.53445ms" +ts=2024-05-02T12:17:23.767740775Z caller=http.go:194 level=debug traceID=1ef280c8808fb790 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.002457ms" +ts=2024-05-02T12:17:23.767473039Z caller=http.go:194 level=debug traceID=242c8a129992c42c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.657172ms" +ts=2024-05-02T12:17:23.76720073Z caller=http.go:194 level=debug traceID=2dcf8fc96e39d11c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.543813ms" +ts=2024-05-02T12:17:23.766751747Z caller=http.go:194 level=debug traceID=333cb92402968388 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.503903ms" +ts=2024-05-02T12:17:23.76613446Z caller=http.go:194 level=debug traceID=7977c9506d320a8f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.891786ms" +ts=2024-05-02T12:17:23.76607823Z caller=http.go:194 level=debug traceID=6ddc958710348ca0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.39012ms" +ts=2024-05-02T12:17:23.76534181Z caller=http.go:194 level=debug traceID=4cc2dfaaeb409224 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.372795ms" +ts=2024-05-02T12:17:23.764885328Z caller=http.go:194 level=debug traceID=33e1fd3f83c17b5a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.516481ms" +ts=2024-05-02T12:17:23.764629715Z caller=http.go:194 level=debug traceID=2b0f33fd3d83b51f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.417219ms" +ts=2024-05-02T12:17:23.762767343Z caller=http.go:194 level=debug traceID=3f9af0d4ec143716 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.789063ms" +ts=2024-05-02T12:17:23.762268863Z caller=http.go:194 level=debug traceID=34419a7058773172 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.562844ms" +ts=2024-05-02T12:17:23.762103994Z caller=http.go:194 level=debug traceID=4e48ad92f27a5cfd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.412329ms" +ts=2024-05-02T12:17:23.761573407Z caller=http.go:194 level=debug traceID=00b0d27ac5be8f0c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.74568ms" +ts=2024-05-02T12:17:23.76116242Z caller=http.go:194 level=debug traceID=77ec24a768f4a668 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.05174ms" +ts=2024-05-02T12:17:23.760587568Z caller=http.go:194 level=debug traceID=1a947a39bd5af05b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.393438ms" +ts=2024-05-02T12:17:23.760137979Z caller=http.go:194 level=debug traceID=172fd7481714d48a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.78908ms" +ts=2024-05-02T12:17:23.759870409Z caller=http.go:194 level=debug traceID=4f619cb4606afcc4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.78984ms" +ts=2024-05-02T12:17:23.759826917Z caller=http.go:194 level=debug traceID=5032710714c5e9f2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.044864ms" +ts=2024-05-02T12:17:23.759771835Z caller=http.go:194 level=debug traceID=67e5eab35ad30fc3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.448828ms" +ts=2024-05-02T12:17:23.758917198Z caller=http.go:194 level=debug traceID=09191d0b91f72c41 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 273.042µs" +ts=2024-05-02T12:17:23.75879499Z caller=http.go:194 level=debug traceID=4b419e44c35d6ad7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.860632ms" +ts=2024-05-02T12:17:23.758279297Z caller=http.go:194 level=debug traceID=46d8c39691d5acf4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.260928ms" +ts=2024-05-02T12:17:23.758179081Z caller=http.go:194 level=debug traceID=66c08399d24ba908 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.507437ms" +ts=2024-05-02T12:17:23.757723061Z caller=http.go:194 level=debug traceID=19594d465871f7b6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.510774ms" +ts=2024-05-02T12:17:23.755823756Z caller=http.go:194 level=debug traceID=62e88ea352d020b7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.42443ms" +ts=2024-05-02T12:17:23.75581968Z caller=http.go:194 level=debug traceID=6ddc958710348ca0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.776162ms" +ts=2024-05-02T12:17:23.754932415Z caller=http.go:194 level=debug traceID=478783870fa3cb5b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.848501ms" +ts=2024-05-02T12:17:23.75474922Z caller=http.go:194 level=debug traceID=0b9edeef86e30d1c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.964883ms" +ts=2024-05-02T12:17:23.754547039Z caller=http.go:194 level=debug traceID=7977c9506d320a8f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.604083ms" +ts=2024-05-02T12:17:23.754320481Z caller=http.go:194 level=debug traceID=09ce847b4d99969d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.181589ms" +ts=2024-05-02T12:17:23.754223098Z caller=http.go:194 level=debug traceID=6bb49450606568bd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.065363ms" +ts=2024-05-02T12:17:23.75399428Z caller=http.go:194 level=debug traceID=26beb7d5eaedce35 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.337355ms" +ts=2024-05-02T12:17:23.753088812Z caller=http.go:194 level=debug traceID=3f9af0d4ec143716 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.565342ms" +ts=2024-05-02T12:17:23.753062938Z caller=http.go:194 level=debug traceID=2b0f33fd3d83b51f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.281375ms" +ts=2024-05-02T12:17:23.752935139Z caller=http.go:194 level=debug traceID=33e1fd3f83c17b5a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.15008ms" +ts=2024-05-02T12:17:23.752324848Z caller=http.go:194 level=debug traceID=4e48ad92f27a5cfd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.60905ms" +ts=2024-05-02T12:17:23.752081219Z caller=http.go:194 level=debug traceID=34419a7058773172 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.41077ms" +ts=2024-05-02T12:17:23.751908681Z caller=http.go:194 level=debug traceID=1a1b34a83daae184 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.679271ms" +ts=2024-05-02T12:17:23.750933056Z caller=http.go:194 level=debug traceID=43681e372ef78d8e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.847308ms" +ts=2024-05-02T12:17:23.750528172Z caller=http.go:194 level=debug traceID=77ec24a768f4a668 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.514101ms" +ts=2024-05-02T12:17:23.750424281Z caller=http.go:194 level=debug traceID=4b419e44c35d6ad7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.836443ms" +ts=2024-05-02T12:17:23.750174848Z caller=http.go:194 level=debug traceID=5242039fd3af98cb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.884416ms" +ts=2024-05-02T12:17:23.749912291Z caller=http.go:194 level=debug traceID=67e5eab35ad30fc3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.790433ms" +ts=2024-05-02T12:17:23.749660573Z caller=http.go:194 level=debug traceID=6b1e097ef56ba9c0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.046395ms" +ts=2024-05-02T12:17:23.749370393Z caller=http.go:194 level=debug traceID=58a08cc549d2ab7d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.643374ms" +ts=2024-05-02T12:17:23.748892567Z caller=http.go:194 level=debug traceID=46d8c39691d5acf4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.715976ms" +ts=2024-05-02T12:17:23.748302247Z caller=http.go:194 level=debug traceID=1eb95c66098d43ea orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.983614ms" +ts=2024-05-02T12:17:23.748022345Z caller=http.go:194 level=debug traceID=09191d0b91f72c41 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 495.101µs" +ts=2024-05-02T12:17:23.748070952Z caller=http.go:194 level=debug traceID=0ad12c243d15cbdf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.85337ms" +ts=2024-05-02T12:17:23.74774561Z caller=http.go:194 level=debug traceID=0df053b850701864 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.665632ms" +ts=2024-05-02T12:17:23.747666801Z caller=http.go:194 level=debug traceID=6906c91f34aae015 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.603059ms" +ts=2024-05-02T12:17:23.746317863Z caller=http.go:194 level=debug traceID=19594d465871f7b6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.732673ms" +ts=2024-05-02T12:17:23.746264691Z caller=http.go:194 level=debug traceID=66c08399d24ba908 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.130705ms" +ts=2024-05-02T12:17:23.745433713Z caller=http.go:194 level=debug traceID=1d309fc00e0842f8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.069958ms" +ts=2024-05-02T12:17:23.745358879Z caller=http.go:194 level=debug traceID=6f557070caec3be2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.758241ms" +ts=2024-05-02T12:17:23.74505421Z caller=http.go:194 level=debug traceID=62e88ea352d020b7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.056866ms" +ts=2024-05-02T12:17:23.744744982Z caller=http.go:194 level=debug traceID=0b9edeef86e30d1c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.04463ms" +ts=2024-05-02T12:17:23.744379976Z caller=http.go:194 level=debug traceID=0b2922efac829cb2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.713445ms" +ts=2024-05-02T12:17:23.74441301Z caller=http.go:194 level=debug traceID=478783870fa3cb5b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.402908ms" +ts=2024-05-02T12:17:23.74421942Z caller=http.go:194 level=debug traceID=6bb49450606568bd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.362617ms" +ts=2024-05-02T12:17:23.744118434Z caller=http.go:194 level=debug traceID=09ce847b4d99969d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.390781ms" +ts=2024-05-02T12:17:23.743061205Z caller=http.go:194 level=debug traceID=117b5e677a9c1d3d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.846744ms" +ts=2024-05-02T12:17:23.743108995Z caller=http.go:194 level=debug traceID=7e197f8cee981c5f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.674176ms" +ts=2024-05-02T12:17:23.743043831Z caller=http.go:194 level=debug traceID=3dcba1d5f779c2f5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.832393ms" +ts=2024-05-02T12:17:23.742991833Z caller=http.go:194 level=debug traceID=348635ec7fc4b7ee orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.312913ms" +ts=2024-05-02T12:17:23.742250857Z caller=http.go:194 level=debug traceID=47beb838a8be0417 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.758819ms" +ts=2024-05-02T12:17:23.741981397Z caller=http.go:194 level=debug traceID=26beb7d5eaedce35 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.900041ms" +ts=2024-05-02T12:17:23.740978213Z caller=http.go:194 level=debug traceID=408bb0e8853eca99 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.155818ms" +ts=2024-05-02T12:17:23.740945359Z caller=http.go:194 level=debug traceID=1eb95c66098d43ea orgID=1218 msg="POST /push.v1.PusherService/Push (200) 18.44061ms" +ts=2024-05-02T12:17:23.74081304Z caller=http.go:194 level=debug traceID=1a1b34a83daae184 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.703821ms" +ts=2024-05-02T12:17:23.740124542Z caller=http.go:194 level=debug traceID=6b1e097ef56ba9c0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.294731ms" +ts=2024-05-02T12:17:23.739728049Z caller=http.go:194 level=debug traceID=490e1c2af231709a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.204062ms" +ts=2024-05-02T12:17:23.739362746Z caller=http.go:194 level=debug traceID=0df053b850701864 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.325607ms" +ts=2024-05-02T12:17:23.739440436Z caller=http.go:194 level=debug traceID=43681e372ef78d8e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.165871ms" +ts=2024-05-02T12:17:23.739324556Z caller=http.go:194 level=debug traceID=5242039fd3af98cb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.964961ms" +ts=2024-05-02T12:17:23.73904101Z caller=http.go:194 level=debug traceID=589ee08155bbc894 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.745095ms" +ts=2024-05-02T12:17:23.737606885Z caller=http.go:194 level=debug traceID=6906c91f34aae015 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.560596ms" +ts=2024-05-02T12:17:23.737073099Z caller=http.go:194 level=debug traceID=464bc3cef697f71c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.964066ms" +ts=2024-05-02T12:17:23.736955408Z caller=http.go:194 level=debug traceID=58a08cc549d2ab7d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.053863ms" +ts=2024-05-02T12:17:23.736909675Z caller=http.go:194 level=debug traceID=0ad12c243d15cbdf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.803956ms" +ts=2024-05-02T12:17:23.73633899Z caller=http.go:194 level=debug traceID=6f557070caec3be2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.979322ms" +ts=2024-05-02T12:17:23.735508455Z caller=http.go:194 level=debug traceID=56cf3b8bf3f71f46 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.750115ms" +ts=2024-05-02T12:17:23.73554493Z caller=http.go:194 level=debug traceID=491b919b86698d2f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.397444ms" +ts=2024-05-02T12:17:23.7346931Z caller=http.go:194 level=debug traceID=1d309fc00e0842f8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.784412ms" +ts=2024-05-02T12:17:23.734436557Z caller=http.go:194 level=debug traceID=4ec07ddbe73ed39e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.458829ms" +ts=2024-05-02T12:17:23.734306671Z caller=http.go:194 level=debug traceID=0c7b378f18eacd7d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.956986ms" +ts=2024-05-02T12:17:23.732885771Z caller=http.go:194 level=debug traceID=0b2922efac829cb2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.474965ms" +ts=2024-05-02T12:17:23.732108998Z caller=http.go:194 level=debug traceID=47beb838a8be0417 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.667511ms" +ts=2024-05-02T12:17:23.73209408Z caller=http.go:194 level=debug traceID=69034b50620dd549 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.424544ms" +ts=2024-05-02T12:17:23.732004006Z caller=http.go:194 level=debug traceID=348635ec7fc4b7ee orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.484785ms" +ts=2024-05-02T12:17:23.731781678Z caller=http.go:194 level=debug traceID=7e197f8cee981c5f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.703134ms" +ts=2024-05-02T12:17:23.731728022Z caller=http.go:194 level=debug traceID=3dcba1d5f779c2f5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.477921ms" +ts=2024-05-02T12:17:23.731620353Z caller=http.go:194 level=debug traceID=29ee9b0566c5c872 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.63069ms" +ts=2024-05-02T12:17:23.731308164Z caller=http.go:194 level=debug traceID=6db47d5027648c90 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.377854ms" +ts=2024-05-02T12:17:23.730681093Z caller=http.go:194 level=debug traceID=76708a0eb8eb7c88 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.958347ms" +ts=2024-05-02T12:17:23.730506484Z caller=http.go:194 level=debug traceID=117b5e677a9c1d3d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.373232ms" +ts=2024-05-02T12:17:23.730081208Z caller=http.go:194 level=debug traceID=2c5209d7dec5190e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.15651ms" +ts=2024-05-02T12:17:23.728029749Z caller=http.go:194 level=debug traceID=589ee08155bbc894 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.147641ms" +ts=2024-05-02T12:17:23.727747491Z caller=http.go:194 level=debug traceID=408bb0e8853eca99 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.71405ms" +ts=2024-05-02T12:17:23.72759552Z caller=http.go:194 level=debug traceID=490e1c2af231709a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.599492ms" +ts=2024-05-02T12:17:23.7272397Z caller=http.go:194 level=debug traceID=3eabeb81abb378ab orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.972384ms" +ts=2024-05-02T12:17:23.725447017Z caller=http.go:194 level=debug traceID=464bc3cef697f71c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.966193ms" +ts=2024-05-02T12:17:23.724753735Z caller=http.go:194 level=debug traceID=491b919b86698d2f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.51246ms" +ts=2024-05-02T12:17:23.724692209Z caller=http.go:194 level=debug traceID=03bdc4935278698b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.198584ms" +ts=2024-05-02T12:17:23.724549155Z caller=http.go:194 level=debug traceID=36f10ded5049870e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.641213ms" +ts=2024-05-02T12:17:23.724413326Z caller=http.go:194 level=debug traceID=0c7b378f18eacd7d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.279785ms" +ts=2024-05-02T12:17:23.724471451Z caller=http.go:194 level=debug traceID=518733f60b543796 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.849167ms" +ts=2024-05-02T12:17:23.724069636Z caller=http.go:194 level=debug traceID=56cf3b8bf3f71f46 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.590422ms" +ts=2024-05-02T12:17:23.724039052Z caller=http.go:194 level=debug traceID=4ec07ddbe73ed39e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.04741ms" +ts=2024-05-02T12:17:23.723948275Z caller=http.go:194 level=debug traceID=7901f095aa5d8626 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.53573ms" +ts=2024-05-02T12:17:23.723606065Z caller=http.go:194 level=debug traceID=1c901ebdf2c92c50 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.100021ms" +ts=2024-05-02T12:17:23.721832842Z caller=http.go:194 level=debug traceID=2e2b6662f31561b9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.159548ms" +ts=2024-05-02T12:17:23.721732Z caller=http.go:194 level=debug traceID=79916df1ddada287 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.170925ms" +ts=2024-05-02T12:17:23.720920272Z caller=http.go:194 level=debug traceID=29ee9b0566c5c872 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.063234ms" +ts=2024-05-02T12:17:23.720880305Z caller=http.go:194 level=debug traceID=69034b50620dd549 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.504093ms" +ts=2024-05-02T12:17:23.720531588Z caller=http.go:194 level=debug traceID=2d1f8931a601c6b4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.598763ms" +ts=2024-05-02T12:17:23.719806643Z caller=http.go:194 level=debug traceID=2c5209d7dec5190e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.287355ms" +ts=2024-05-02T12:17:23.719293465Z caller=http.go:194 level=debug traceID=76708a0eb8eb7c88 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.056485ms" +ts=2024-05-02T12:17:23.71903491Z caller=http.go:194 level=debug traceID=6db47d5027648c90 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.72956ms" +ts=2024-05-02T12:17:23.71874873Z caller=http.go:194 level=debug traceID=7894894c5dfbd78e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.050604ms" +ts=2024-05-02T12:17:23.718079219Z caller=http.go:194 level=debug traceID=26a6162409c510aa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.727066ms" +ts=2024-05-02T12:17:23.71788273Z caller=http.go:194 level=debug traceID=024b200b166c13f8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.366872ms" +ts=2024-05-02T12:17:23.71759986Z caller=http.go:194 level=debug traceID=182a03260e0aec57 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.872924ms" +ts=2024-05-02T12:17:23.716711869Z caller=http.go:194 level=debug traceID=3eabeb81abb378ab orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.710526ms" +ts=2024-05-02T12:17:23.716245479Z caller=http.go:194 level=debug traceID=6085aaec0f02c202 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.405353ms" +ts=2024-05-02T12:17:23.715881748Z caller=http.go:194 level=debug traceID=7fe3ca512f45389d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.488466ms" +ts=2024-05-02T12:17:23.714017261Z caller=http.go:194 level=debug traceID=7901f095aa5d8626 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.086037ms" +ts=2024-05-02T12:17:23.713691163Z caller=http.go:194 level=debug traceID=518733f60b543796 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.070745ms" +ts=2024-05-02T12:17:23.713413816Z caller=http.go:194 level=debug traceID=1c901ebdf2c92c50 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.44491ms" +ts=2024-05-02T12:17:23.713306637Z caller=http.go:194 level=debug traceID=36f10ded5049870e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.873553ms" +ts=2024-05-02T12:17:23.713060226Z caller=http.go:194 level=debug traceID=1bf074971a465a28 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.38377ms" +ts=2024-05-02T12:17:23.71241372Z caller=http.go:194 level=debug traceID=03bdc4935278698b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.850351ms" +ts=2024-05-02T12:17:23.711962362Z caller=http.go:194 level=debug traceID=0812cf4c3c5662ad orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.624003ms" +ts=2024-05-02T12:17:23.711565524Z caller=http.go:194 level=debug traceID=2d1f8931a601c6b4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.610785ms" +ts=2024-05-02T12:17:23.711580977Z caller=http.go:194 level=debug traceID=48469ae3f50cc54f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.533099ms" +ts=2024-05-02T12:17:23.711319891Z caller=http.go:194 level=debug traceID=60245fcb497cbfda orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.907327ms" +ts=2024-05-02T12:17:23.710657696Z caller=http.go:194 level=debug traceID=37629031aca2d451 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.816316ms" +ts=2024-05-02T12:17:23.71042272Z caller=http.go:194 level=debug traceID=2e2b6662f31561b9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.212562ms" +ts=2024-05-02T12:17:23.709760788Z caller=http.go:194 level=debug traceID=79916df1ddada287 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.548848ms" +ts=2024-05-02T12:17:23.709558019Z caller=http.go:194 level=debug traceID=19c10e4f2fb1a9b6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.222528ms" +ts=2024-05-02T12:17:23.709060999Z caller=http.go:194 level=debug traceID=58db2b0d85d2d0bc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.147578ms" +ts=2024-05-02T12:17:23.707850228Z caller=http.go:194 level=debug traceID=26a6162409c510aa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.885599ms" +ts=2024-05-02T12:17:23.707097095Z caller=http.go:194 level=debug traceID=024b200b166c13f8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.623986ms" +ts=2024-05-02T12:17:23.706681689Z caller=http.go:194 level=debug traceID=0defbbb71cfba681 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.727841ms" +ts=2024-05-02T12:17:23.706357344Z caller=http.go:194 level=debug traceID=54aa8a5a51b7d1bc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.902094ms" +ts=2024-05-02T12:17:23.706300043Z caller=http.go:194 level=debug traceID=44aa856f1f85be36 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.437633ms" +ts=2024-05-02T12:17:23.705776981Z caller=http.go:194 level=debug traceID=182a03260e0aec57 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.778139ms" +ts=2024-05-02T12:17:23.705499352Z caller=http.go:194 level=debug traceID=7fe3ca512f45389d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.994479ms" +ts=2024-05-02T12:17:23.704325107Z caller=http.go:194 level=debug traceID=7894894c5dfbd78e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.758212ms" +ts=2024-05-02T12:17:23.704000358Z caller=http.go:194 level=debug traceID=6c5b11f21ad14b33 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.768296ms" +ts=2024-05-02T12:17:23.704057424Z caller=http.go:194 level=debug traceID=6085aaec0f02c202 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.995367ms" +ts=2024-05-02T12:17:23.703906926Z caller=http.go:194 level=debug traceID=52e9e2d5240eff9e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.702382ms" +ts=2024-05-02T12:17:23.703406075Z caller=http.go:194 level=debug traceID=0abb22b2b2e0cad6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.52195ms" +ts=2024-05-02T12:17:23.703146927Z caller=http.go:194 level=debug traceID=31897c16aac2ac45 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.338811ms" +ts=2024-05-02T12:17:23.702270351Z caller=http.go:194 level=debug traceID=1bf074971a465a28 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.83944ms" +ts=2024-05-02T12:17:23.701633762Z caller=http.go:194 level=debug traceID=0812cf4c3c5662ad orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.796787ms" +ts=2024-05-02T12:17:23.701225962Z caller=http.go:194 level=debug traceID=07dd2d229b55e8f2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.823768ms" +ts=2024-05-02T12:17:23.700663168Z caller=http.go:194 level=debug traceID=6068c952c34cff80 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.793678ms" +ts=2024-05-02T12:17:23.700732103Z caller=http.go:194 level=debug traceID=48469ae3f50cc54f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.715026ms" +ts=2024-05-02T12:17:23.700360075Z caller=http.go:194 level=debug traceID=37629031aca2d451 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.513697ms" +ts=2024-05-02T12:17:23.700042071Z caller=http.go:194 level=debug traceID=60245fcb497cbfda orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.904357ms" +ts=2024-05-02T12:17:23.69986642Z caller=http.go:194 level=debug traceID=67c4ec4828582449 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 229.789µs" +ts=2024-05-02T12:17:23.699376083Z caller=http.go:194 level=debug traceID=6e6e8c0dbd2f9c77 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 261.302µs" +ts=2024-05-02T12:17:23.69874068Z caller=http.go:194 level=debug traceID=7113fb7dcc35d411 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.19932ms" +ts=2024-05-02T12:17:23.698484426Z caller=http.go:194 level=debug traceID=180148472d742477 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.020448ms" +ts=2024-05-02T12:17:23.698316631Z caller=http.go:194 level=debug traceID=5f9c6971feaf836a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.493476ms" +ts=2024-05-02T12:17:23.698016491Z caller=http.go:194 level=debug traceID=1a01d3a3a1088f5d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.392824ms" +ts=2024-05-02T12:17:23.697981122Z caller=http.go:194 level=debug traceID=19c10e4f2fb1a9b6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.561894ms" +ts=2024-05-02T12:17:23.697528323Z caller=http.go:194 level=debug traceID=6ed0d45cdee1de4b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.93537ms" +ts=2024-05-02T12:17:23.697491916Z caller=http.go:194 level=debug traceID=58db2b0d85d2d0bc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.248936ms" +ts=2024-05-02T12:17:23.697168455Z caller=http.go:194 level=debug traceID=4f43e8f363804416 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.72097ms" +ts=2024-05-02T12:17:23.695047952Z caller=http.go:194 level=debug traceID=44aa856f1f85be36 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.22695ms" +ts=2024-05-02T12:17:23.694596485Z caller=http.go:194 level=debug traceID=585981d7bc58735a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.790245ms" +ts=2024-05-02T12:17:23.693967121Z caller=http.go:194 level=debug traceID=54aa8a5a51b7d1bc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.331936ms" +ts=2024-05-02T12:17:23.693904853Z caller=http.go:194 level=debug traceID=52e9e2d5240eff9e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.605824ms" +ts=2024-05-02T12:17:23.693496202Z caller=http.go:194 level=debug traceID=0defbbb71cfba681 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.724946ms" +ts=2024-05-02T12:17:23.693302618Z caller=http.go:194 level=debug traceID=6c5b11f21ad14b33 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.643704ms" +ts=2024-05-02T12:17:23.693009071Z caller=http.go:194 level=debug traceID=21e8081d3e88f6bc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.737536ms" +ts=2024-05-02T12:17:23.692774601Z caller=http.go:194 level=debug traceID=31897c16aac2ac45 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.001428ms" +ts=2024-05-02T12:17:23.692070298Z caller=http.go:194 level=debug traceID=0abb22b2b2e0cad6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.48745ms" +ts=2024-05-02T12:17:23.692013053Z caller=http.go:194 level=debug traceID=4398f00df5e8db7b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.68491ms" +ts=2024-05-02T12:17:23.690471831Z caller=http.go:194 level=debug traceID=07dd2d229b55e8f2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.681582ms" +ts=2024-05-02T12:17:23.690232982Z caller=http.go:194 level=debug traceID=6068c952c34cff80 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.930829ms" +ts=2024-05-02T12:17:23.689684617Z caller=http.go:194 level=debug traceID=0646647e9dee3cbe orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.56947ms" +ts=2024-05-02T12:17:23.689518213Z caller=http.go:194 level=debug traceID=33b134c84b5d5ae6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.098946ms" +ts=2024-05-02T12:17:23.689473627Z caller=http.go:194 level=debug traceID=6e6e8c0dbd2f9c77 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 442.714µs" +ts=2024-05-02T12:17:23.689072686Z caller=http.go:194 level=debug traceID=2cfce98a8ea69d13 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.096067ms" +ts=2024-05-02T12:17:23.688926663Z caller=http.go:194 level=debug traceID=67c4ec4828582449 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 362.168µs" +ts=2024-05-02T12:17:23.688864333Z caller=http.go:194 level=debug traceID=31b3cae28bedf5fb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.289574ms" +ts=2024-05-02T12:17:23.687940051Z caller=http.go:194 level=debug traceID=16475e8126810614 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.055111ms" +ts=2024-05-02T12:17:23.687708647Z caller=http.go:194 level=debug traceID=7113fb7dcc35d411 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.085784ms" +ts=2024-05-02T12:17:23.687235767Z caller=http.go:194 level=debug traceID=1cf98ca251a6f01f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.329141ms" +ts=2024-05-02T12:17:23.68712782Z caller=http.go:194 level=debug traceID=180148472d742477 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.031871ms" +ts=2024-05-02T12:17:23.687097863Z caller=http.go:194 level=debug traceID=5f9c6971feaf836a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.316162ms" +ts=2024-05-02T12:17:23.686867875Z caller=http.go:194 level=debug traceID=06efaa099c75cf16 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 222.933µs" +ts=2024-05-02T12:17:23.686692884Z caller=http.go:194 level=debug traceID=1a01d3a3a1088f5d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.457276ms" +ts=2024-05-02T12:17:23.686490804Z caller=http.go:194 level=debug traceID=382f3039293384a0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.173428ms" +ts=2024-05-02T12:17:23.686170874Z caller=http.go:194 level=debug traceID=585981d7bc58735a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.657402ms" +ts=2024-05-02T12:17:23.686291446Z caller=http.go:194 level=debug traceID=6ed0d45cdee1de4b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.4287ms" +ts=2024-05-02T12:17:23.685961853Z caller=http.go:194 level=debug traceID=4f43e8f363804416 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.616187ms" +ts=2024-05-02T12:17:23.685415887Z caller=http.go:194 level=debug traceID=4fdf975da9008a14 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.989626ms" +ts=2024-05-02T12:17:23.683924518Z caller=http.go:194 level=debug traceID=4fad4ff7df4ab1fb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.732423ms" +ts=2024-05-02T12:17:23.68382329Z caller=http.go:194 level=debug traceID=73b4c9d48aa5b46d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.014095ms" +ts=2024-05-02T12:17:23.683175776Z caller=http.go:194 level=debug traceID=67ea916bb18a4311 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.395228ms" +ts=2024-05-02T12:17:23.68274598Z caller=http.go:194 level=debug traceID=5671d6a96d579192 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.693848ms" +ts=2024-05-02T12:17:23.682694958Z caller=http.go:194 level=debug traceID=13fd719a196d9fc3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.551573ms" +ts=2024-05-02T12:17:23.682556301Z caller=http.go:194 level=debug traceID=272c72a4dc4f399e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.86331ms" +ts=2024-05-02T12:17:23.681957563Z caller=http.go:194 level=debug traceID=21e8081d3e88f6bc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.829747ms" +ts=2024-05-02T12:17:23.681780669Z caller=http.go:194 level=debug traceID=1a37e02fb44a0161 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.12245ms" +ts=2024-05-02T12:17:23.680333213Z caller=http.go:194 level=debug traceID=4398f00df5e8db7b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.56324ms" +ts=2024-05-02T12:17:23.679743799Z caller=http.go:194 level=debug traceID=50bd3c533b4afd39 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.748042ms" +ts=2024-05-02T12:17:23.679757026Z caller=http.go:194 level=debug traceID=0646647e9dee3cbe orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.510917ms" +ts=2024-05-02T12:17:23.679711068Z caller=http.go:194 level=debug traceID=2f760df896e2edea orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.286753ms" +ts=2024-05-02T12:17:23.679139669Z caller=http.go:194 level=debug traceID=2cfce98a8ea69d13 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.151282ms" +ts=2024-05-02T12:17:23.678191667Z caller=http.go:194 level=debug traceID=31b3cae28bedf5fb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.763662ms" +ts=2024-05-02T12:17:23.677577771Z caller=http.go:194 level=debug traceID=33b134c84b5d5ae6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.653407ms" +ts=2024-05-02T12:17:23.677056103Z caller=http.go:194 level=debug traceID=4cc2cf6198bd65d6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.712341ms" +ts=2024-05-02T12:17:23.676508661Z caller=http.go:194 level=debug traceID=06efaa099c75cf16 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 291.393µs" +ts=2024-05-02T12:17:23.676247838Z caller=http.go:194 level=debug traceID=78541976ee457cbf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.932997ms" +ts=2024-05-02T12:17:23.67621879Z caller=http.go:194 level=debug traceID=16475e8126810614 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.890184ms" +ts=2024-05-02T12:17:23.676228636Z caller=http.go:194 level=debug traceID=1cf98ca251a6f01f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.121174ms" +ts=2024-05-02T12:17:23.676194085Z caller=http.go:194 level=debug traceID=382f3039293384a0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.975717ms" +ts=2024-05-02T12:17:23.675824971Z caller=http.go:194 level=debug traceID=392f664d8c742afd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.836991ms" +ts=2024-05-02T12:17:23.674890848Z caller=http.go:194 level=debug traceID=4fad4ff7df4ab1fb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.151274ms" +ts=2024-05-02T12:17:23.674261843Z caller=http.go:194 level=debug traceID=7d623b5122d41579 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 319.093µs" +ts=2024-05-02T12:17:23.674063673Z caller=http.go:194 level=debug traceID=4fdf975da9008a14 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.56407ms" +ts=2024-05-02T12:17:23.673911275Z caller=http.go:194 level=debug traceID=4633e7e79f88a53a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.010043ms" +ts=2024-05-02T12:17:23.673389877Z caller=http.go:194 level=debug traceID=67ea916bb18a4311 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.757013ms" +ts=2024-05-02T12:17:23.673363043Z caller=http.go:194 level=debug traceID=2a3dfcd79fc65364 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.401467ms" +ts=2024-05-02T12:17:23.673140343Z caller=http.go:194 level=debug traceID=13fd719a196d9fc3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.658578ms" +ts=2024-05-02T12:17:23.673054735Z caller=http.go:194 level=debug traceID=4ce0ffff27af3340 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.855246ms" +ts=2024-05-02T12:17:23.672474495Z caller=http.go:194 level=debug traceID=73b4c9d48aa5b46d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.702529ms" +ts=2024-05-02T12:17:23.67135137Z caller=http.go:194 level=debug traceID=5671d6a96d579192 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.321218ms" +ts=2024-05-02T12:17:23.671281474Z caller=http.go:194 level=debug traceID=272c72a4dc4f399e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.820734ms" +ts=2024-05-02T12:17:23.671261657Z caller=http.go:194 level=debug traceID=1c58a52122bde990 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.760868ms" +ts=2024-05-02T12:17:23.670303056Z caller=http.go:194 level=debug traceID=50bd3c533b4afd39 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.354358ms" +ts=2024-05-02T12:17:23.670350884Z caller=http.go:194 level=debug traceID=1a37e02fb44a0161 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.17071ms" +ts=2024-05-02T12:17:23.670335586Z caller=http.go:194 level=debug traceID=0a26da542ac4e7c0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.07234ms" +ts=2024-05-02T12:17:23.670076089Z caller=http.go:194 level=debug traceID=3120a109152dbf4a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.060604ms" +ts=2024-05-02T12:17:23.668213648Z caller=http.go:194 level=debug traceID=1382ece61c3fbd88 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.070473ms" +ts=2024-05-02T12:17:23.666215085Z caller=http.go:194 level=debug traceID=2f760df896e2edea orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.078822ms" +ts=2024-05-02T12:17:23.666156999Z caller=http.go:194 level=debug traceID=32d34a396308ab05 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.766702ms" +ts=2024-05-02T12:17:23.665773658Z caller=http.go:194 level=debug traceID=0813bf849bb2217c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.280919ms" +ts=2024-05-02T12:17:23.66575617Z caller=http.go:194 level=debug traceID=4cc2cf6198bd65d6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.503466ms" +ts=2024-05-02T12:17:23.665612742Z caller=http.go:194 level=debug traceID=392f664d8c742afd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.676098ms" +ts=2024-05-02T12:17:23.665025035Z caller=http.go:194 level=debug traceID=78541976ee457cbf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.923686ms" +ts=2024-05-02T12:17:23.664782233Z caller=http.go:194 level=debug traceID=7ae796f0d903dcee orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.103959ms" +ts=2024-05-02T12:17:23.664703614Z caller=http.go:194 level=debug traceID=57f487225835be5e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.470103ms" +ts=2024-05-02T12:17:23.66381963Z caller=http.go:194 level=debug traceID=7d623b5122d41579 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 257.877µs" +ts=2024-05-02T12:17:23.663256553Z caller=http.go:194 level=debug traceID=4633e7e79f88a53a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.906069ms" +ts=2024-05-02T12:17:23.662554718Z caller=http.go:194 level=debug traceID=2a3dfcd79fc65364 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.081101ms" +ts=2024-05-02T12:17:23.662027045Z caller=http.go:194 level=debug traceID=4ce0ffff27af3340 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.890441ms" +ts=2024-05-02T12:17:23.660643826Z caller=http.go:194 level=debug traceID=4cb274f580252adf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 13.587853ms" +ts=2024-05-02T12:17:23.660259619Z caller=http.go:194 level=debug traceID=3dd31d9c25c11bbd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.59411ms" +ts=2024-05-02T12:17:23.660206983Z caller=http.go:194 level=debug traceID=65bf7268f86f080b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.24995ms" +ts=2024-05-02T12:17:23.65999487Z caller=http.go:194 level=debug traceID=1382ece61c3fbd88 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.61483ms" +ts=2024-05-02T12:17:23.65956265Z caller=http.go:194 level=debug traceID=13761f1e29a4b537 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.639016ms" +ts=2024-05-02T12:17:23.659442201Z caller=http.go:194 level=debug traceID=1c58a52122bde990 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.378315ms" +ts=2024-05-02T12:17:23.659365376Z caller=http.go:194 level=debug traceID=3120a109152dbf4a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.67398ms" +ts=2024-05-02T12:17:23.658741226Z caller=http.go:194 level=debug traceID=0a26da542ac4e7c0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.665276ms" +ts=2024-05-02T12:17:23.658220281Z caller=http.go:194 level=debug traceID=0b7d3247d2584bd8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.050871ms" +ts=2024-05-02T12:17:23.658015627Z caller=http.go:194 level=debug traceID=76a28ee5e5ad4998 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.19256ms" +ts=2024-05-02T12:17:23.657390423Z caller=http.go:194 level=debug traceID=554eac1b598f19cc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.231802ms" +ts=2024-05-02T12:17:23.657291858Z caller=http.go:194 level=debug traceID=57ebcdcb5ee97755 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.526742ms" +ts=2024-05-02T12:17:23.656928551Z caller=http.go:194 level=debug traceID=797199dfb85d96c2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.374508ms" +ts=2024-05-02T12:17:23.656027338Z caller=http.go:194 level=debug traceID=5a04a631ee0d5f24 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.167308ms" +ts=2024-05-02T12:17:23.655280403Z caller=http.go:194 level=debug traceID=103aa6e85e59d154 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.064432ms" +ts=2024-05-02T12:17:23.655054703Z caller=http.go:194 level=debug traceID=32d34a396308ab05 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.723404ms" +ts=2024-05-02T12:17:23.65462161Z caller=http.go:194 level=debug traceID=0813bf849bb2217c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.703731ms" +ts=2024-05-02T12:17:23.65410332Z caller=http.go:194 level=debug traceID=57f487225835be5e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.873464ms" +ts=2024-05-02T12:17:23.653707076Z caller=http.go:194 level=debug traceID=57b2cb1d1cc7a632 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.306518ms" +ts=2024-05-02T12:17:23.653201304Z caller=http.go:194 level=debug traceID=7ae796f0d903dcee orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.860086ms" +ts=2024-05-02T12:17:23.650150022Z caller=http.go:194 level=debug traceID=65bf7268f86f080b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.295743ms" +ts=2024-05-02T12:17:23.649815315Z caller=http.go:194 level=debug traceID=36a94189114ad931 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.066351ms" +ts=2024-05-02T12:17:23.649579484Z caller=http.go:194 level=debug traceID=76a28ee5e5ad4998 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.77794ms" +ts=2024-05-02T12:17:23.649253254Z caller=http.go:194 level=debug traceID=723b8d7798b98c3f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.378254ms" +ts=2024-05-02T12:17:23.648954199Z caller=http.go:194 level=debug traceID=59edebac8ae181ed orgID=3648 msg="POST /push.v1.PusherService/Push (200) 263.163µs" +ts=2024-05-02T12:17:23.648723715Z caller=http.go:194 level=debug traceID=4cb274f580252adf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.727639ms" +ts=2024-05-02T12:17:23.648436892Z caller=http.go:194 level=debug traceID=0add818c98f1caef orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.344704ms" +ts=2024-05-02T12:17:23.648004675Z caller=http.go:194 level=debug traceID=13761f1e29a4b537 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.346638ms" +ts=2024-05-02T12:17:23.647532877Z caller=http.go:194 level=debug traceID=3dd31d9c25c11bbd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.913482ms" +ts=2024-05-02T12:17:23.647017707Z caller=http.go:194 level=debug traceID=554eac1b598f19cc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.040932ms" +ts=2024-05-02T12:17:23.64691365Z caller=http.go:194 level=debug traceID=0b7d3247d2584bd8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.237791ms" +ts=2024-05-02T12:17:23.646432536Z caller=http.go:194 level=debug traceID=57b2cb1d1cc7a632 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.257586ms" +ts=2024-05-02T12:17:23.646206598Z caller=http.go:194 level=debug traceID=2c62e2d353b33abe orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.584725ms" +ts=2024-05-02T12:17:23.646007946Z caller=http.go:194 level=debug traceID=7b955f835be0d598 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.8996ms" +ts=2024-05-02T12:17:23.645903013Z caller=http.go:194 level=debug traceID=797199dfb85d96c2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.427068ms" +ts=2024-05-02T12:17:23.645653538Z caller=http.go:194 level=debug traceID=57ebcdcb5ee97755 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.350293ms" +ts=2024-05-02T12:17:23.645338881Z caller=http.go:194 level=debug traceID=30cba878ffb8553f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.952247ms" +ts=2024-05-02T12:17:23.644945257Z caller=http.go:194 level=debug traceID=5a04a631ee0d5f24 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.612246ms" +ts=2024-05-02T12:17:23.644536426Z caller=http.go:194 level=debug traceID=11717092aae45b7a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.046005ms" +ts=2024-05-02T12:17:23.643315898Z caller=http.go:194 level=debug traceID=103aa6e85e59d154 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.976513ms" +ts=2024-05-02T12:17:23.641407683Z caller=http.go:194 level=debug traceID=71b3655de3bdbe9f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.020505ms" +ts=2024-05-02T12:17:23.64045028Z caller=http.go:194 level=debug traceID=36a94189114ad931 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.442ms" +ts=2024-05-02T12:17:23.64008408Z caller=http.go:194 level=debug traceID=62ce9272d916e901 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 227.468µs" +ts=2024-05-02T12:17:23.63935439Z caller=http.go:194 level=debug traceID=5cc0c7e36f7a7cb1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.808792ms" +ts=2024-05-02T12:17:23.638975833Z caller=http.go:194 level=debug traceID=59edebac8ae181ed orgID=1218 msg="POST /push.v1.PusherService/Push (200) 403.677µs" +ts=2024-05-02T12:17:23.638586555Z caller=http.go:194 level=debug traceID=0add818c98f1caef orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.717507ms" +ts=2024-05-02T12:17:23.638015982Z caller=http.go:194 level=debug traceID=7bf88f8902e656e1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 16.660752ms" +ts=2024-05-02T12:17:23.637738173Z caller=http.go:194 level=debug traceID=5d1e330b9c6ead23 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.882951ms" +ts=2024-05-02T12:17:23.637350481Z caller=http.go:194 level=debug traceID=723b8d7798b98c3f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.185731ms" +ts=2024-05-02T12:17:23.637106682Z caller=http.go:194 level=debug traceID=2c62e2d353b33abe orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.68658ms" +ts=2024-05-02T12:17:23.635380451Z caller=http.go:194 level=debug traceID=30cba878ffb8553f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.979279ms" +ts=2024-05-02T12:17:23.635241293Z caller=http.go:194 level=debug traceID=138d376eb29fc56c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 304.082µs" +ts=2024-05-02T12:17:23.63446025Z caller=http.go:194 level=debug traceID=7b955f835be0d598 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.10084ms" +ts=2024-05-02T12:17:23.633586655Z caller=http.go:194 level=debug traceID=11717092aae45b7a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.563652ms" +ts=2024-05-02T12:17:23.633496601Z caller=http.go:194 level=debug traceID=2b1ba65744e671b5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.894253ms" +ts=2024-05-02T12:17:23.632849614Z caller=http.go:194 level=debug traceID=3dfa130ee009e903 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.180098ms" +ts=2024-05-02T12:17:23.632677918Z caller=http.go:194 level=debug traceID=49ec6c1740c51fd7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.386464ms" +ts=2024-05-02T12:17:23.632323373Z caller=http.go:194 level=debug traceID=2cd11127c46bd62e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.711308ms" +ts=2024-05-02T12:17:23.631361128Z caller=http.go:194 level=debug traceID=71b3655de3bdbe9f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.000831ms" +ts=2024-05-02T12:17:23.631246926Z caller=http.go:194 level=debug traceID=12000fabebb183f1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.615503ms" +ts=2024-05-02T12:17:23.629384259Z caller=http.go:194 level=debug traceID=62ce9272d916e901 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 378.906µs" +ts=2024-05-02T12:17:23.628327936Z caller=http.go:194 level=debug traceID=0f42182f07ab87ff orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.256662ms" +ts=2024-05-02T12:17:23.62756472Z caller=http.go:194 level=debug traceID=5d1e330b9c6ead23 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.925161ms" +ts=2024-05-02T12:17:23.627480496Z caller=http.go:194 level=debug traceID=5cc0c7e36f7a7cb1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.622342ms" +ts=2024-05-02T12:17:23.627299951Z caller=http.go:194 level=debug traceID=629603869a2d0580 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.958054ms" +ts=2024-05-02T12:17:23.625145186Z caller=http.go:194 level=debug traceID=764e52ff213f7c06 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.028198ms" +ts=2024-05-02T12:17:23.624142328Z caller=http.go:194 level=debug traceID=29c042691f7950b4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.114846ms" +ts=2024-05-02T12:17:23.62381625Z caller=http.go:194 level=debug traceID=138d376eb29fc56c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 389.104µs" +ts=2024-05-02T12:17:23.622997855Z caller=http.go:194 level=debug traceID=2b55ebb3da48e553 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.276969ms" +ts=2024-05-02T12:17:23.622703428Z caller=http.go:194 level=debug traceID=2b1ba65744e671b5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.275505ms" +ts=2024-05-02T12:17:23.622212334Z caller=http.go:194 level=debug traceID=51fc94ea7e707c90 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.112813ms" +ts=2024-05-02T12:17:23.62218483Z caller=http.go:194 level=debug traceID=641a055a70f56e18 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.38875ms" +ts=2024-05-02T12:17:23.622027107Z caller=http.go:194 level=debug traceID=3dfa130ee009e903 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.739593ms" +ts=2024-05-02T12:17:23.621983604Z caller=http.go:194 level=debug traceID=49ec6c1740c51fd7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.336077ms" +ts=2024-05-02T12:17:23.620631514Z caller=http.go:194 level=debug traceID=12000fabebb183f1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.180478ms" +ts=2024-05-02T12:17:23.620333412Z caller=http.go:194 level=debug traceID=2cd11127c46bd62e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.303712ms" +ts=2024-05-02T12:17:23.620188082Z caller=http.go:194 level=debug traceID=7bf88f8902e656e1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 20.629404ms" +ts=2024-05-02T12:17:23.61969442Z caller=http.go:194 level=debug traceID=2b74966eaee8c838 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.063936ms" +ts=2024-05-02T12:17:23.619706046Z caller=http.go:194 level=debug traceID=67d551266581f34b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.210519ms" +ts=2024-05-02T12:17:23.619190012Z caller=http.go:194 level=debug traceID=0b2974cd8199707e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.528159ms" +ts=2024-05-02T12:17:23.618706141Z caller=http.go:194 level=debug traceID=0f42182f07ab87ff orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.632228ms" +ts=2024-05-02T12:17:23.618055558Z caller=http.go:194 level=debug traceID=322ad33eafa6324d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.518868ms" +ts=2024-05-02T12:17:23.617256427Z caller=http.go:194 level=debug traceID=793f125fc57e69e3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.427593ms" +ts=2024-05-02T12:17:23.617130318Z caller=http.go:194 level=debug traceID=5770c384ad3924c8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.710606ms" +ts=2024-05-02T12:17:23.616294356Z caller=http.go:194 level=debug traceID=5a78190e94f5c1eb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.918505ms" +ts=2024-05-02T12:17:23.616149174Z caller=http.go:194 level=debug traceID=629603869a2d0580 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.967178ms" +ts=2024-05-02T12:17:23.614283677Z caller=http.go:194 level=debug traceID=30d953e2aebd5d75 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.58415ms" +ts=2024-05-02T12:17:23.613690494Z caller=http.go:194 level=debug traceID=764e52ff213f7c06 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.776387ms" +ts=2024-05-02T12:17:23.613247904Z caller=http.go:194 level=debug traceID=498aee689b01c781 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.739466ms" +ts=2024-05-02T12:17:23.613212809Z caller=http.go:194 level=debug traceID=0d09e04d49bf67af orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.881196ms" +ts=2024-05-02T12:17:23.612900937Z caller=http.go:194 level=debug traceID=40b70e1dc44bc69a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.713456ms" +ts=2024-05-02T12:17:23.612740476Z caller=http.go:194 level=debug traceID=3393fef387fe72fe orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.68967ms" +ts=2024-05-02T12:17:23.61255128Z caller=http.go:194 level=debug traceID=29c042691f7950b4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.037627ms" +ts=2024-05-02T12:17:23.612485637Z caller=http.go:194 level=debug traceID=3eed0cfc0294f699 orgID=3648 msg="POST /push.v1.PusherService/Push (400) 90.882µs" +ts=2024-05-02T12:17:23.612204581Z caller=http.go:194 level=debug traceID=51fc94ea7e707c90 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.497799ms" +ts=2024-05-02T12:17:23.61108107Z caller=http.go:194 level=debug traceID=2b55ebb3da48e553 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.8668ms" +ts=2024-05-02T12:17:23.610657444Z caller=http.go:194 level=debug traceID=641a055a70f56e18 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.112421ms" +ts=2024-05-02T12:17:23.609143279Z caller=http.go:194 level=debug traceID=0b2974cd8199707e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.011154ms" +ts=2024-05-02T12:17:23.608979853Z caller=http.go:194 level=debug traceID=67d551266581f34b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.502566ms" +ts=2024-05-02T12:17:23.608824604Z caller=http.go:194 level=debug traceID=1c5d54d97f13c174 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.957659ms" +ts=2024-05-02T12:17:23.6075915Z caller=http.go:194 level=debug traceID=2b74966eaee8c838 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.883091ms" +ts=2024-05-02T12:17:23.607516309Z caller=http.go:194 level=debug traceID=6d80cd7d732b6a49 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.261643ms" +ts=2024-05-02T12:17:23.607491167Z caller=http.go:194 level=debug traceID=6aef0473185f2fb1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 258.143µs" +ts=2024-05-02T12:17:23.606804093Z caller=http.go:194 level=debug traceID=322ad33eafa6324d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.683901ms" +ts=2024-05-02T12:17:23.606631505Z caller=http.go:194 level=debug traceID=247cc58f1b84ae1d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.985548ms" +ts=2024-05-02T12:17:23.606406954Z caller=http.go:194 level=debug traceID=793f125fc57e69e3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.834163ms" +ts=2024-05-02T12:17:23.606133764Z caller=http.go:194 level=debug traceID=3361fe85040bfdd3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.66261ms" +ts=2024-05-02T12:17:23.605520637Z caller=http.go:194 level=debug traceID=5770c384ad3924c8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.674338ms" +ts=2024-05-02T12:17:23.605077175Z caller=http.go:194 level=debug traceID=5a78190e94f5c1eb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.915133ms" +ts=2024-05-02T12:17:23.604459993Z caller=http.go:194 level=debug traceID=40b5ffd28c76778a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.755357ms" +ts=2024-05-02T12:17:23.604213569Z caller=http.go:194 level=debug traceID=09fd51465627e3f9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.142815ms" +ts=2024-05-02T12:17:23.603385263Z caller=http.go:194 level=debug traceID=2de5d2596da6a012 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.446991ms" +ts=2024-05-02T12:17:23.603338423Z caller=http.go:194 level=debug traceID=3b823dedd54e73f5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.164788ms" +ts=2024-05-02T12:17:23.602573014Z caller=http.go:194 level=debug traceID=30d953e2aebd5d75 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.121321ms" +ts=2024-05-02T12:17:23.602389947Z caller=http.go:194 level=debug traceID=18d884ac073bc139 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.301387ms" +ts=2024-05-02T12:17:23.602143954Z caller=http.go:194 level=debug traceID=3393fef387fe72fe orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.141066ms" +ts=2024-05-02T12:17:23.601936735Z caller=http.go:194 level=debug traceID=498aee689b01c781 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.900907ms" +ts=2024-05-02T12:17:23.601885048Z caller=http.go:194 level=debug traceID=40b70e1dc44bc69a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.458847ms" +ts=2024-05-02T12:17:23.601516451Z caller=http.go:194 level=debug traceID=3eed0cfc0294f699 orgID=1218 msg="POST /push.v1.PusherService/Push (400) 202.77µs" +ts=2024-05-02T12:17:23.601451781Z caller=http.go:194 level=debug traceID=0d09e04d49bf67af orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.506645ms" +ts=2024-05-02T12:17:23.601295766Z caller=http.go:194 level=debug traceID=6d99746281ec303e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.523288ms" +ts=2024-05-02T12:17:23.598084428Z caller=http.go:194 level=debug traceID=1f1f1b64e906657a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.602771ms" +ts=2024-05-02T12:17:23.597727261Z caller=http.go:194 level=debug traceID=6aef0473185f2fb1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 344.36µs" +ts=2024-05-02T12:17:23.597560884Z caller=http.go:194 level=debug traceID=6d80cd7d732b6a49 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.072162ms" +ts=2024-05-02T12:17:23.597551496Z caller=http.go:194 level=debug traceID=1c5d54d97f13c174 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.009054ms" +ts=2024-05-02T12:17:23.597279218Z caller=http.go:194 level=debug traceID=4088a321ed2d2876 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.515604ms" +ts=2024-05-02T12:17:23.596881589Z caller=http.go:194 level=debug traceID=2af9801a4bd7ff98 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.032904ms" +ts=2024-05-02T12:17:23.596874363Z caller=http.go:194 level=debug traceID=1a1a534d5ce81f21 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.325299ms" +ts=2024-05-02T12:17:23.596112688Z caller=http.go:194 level=debug traceID=49fc863ea90cfb93 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.790025ms" +ts=2024-05-02T12:17:23.595635252Z caller=http.go:194 level=debug traceID=3361fe85040bfdd3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.720442ms" +ts=2024-05-02T12:17:23.595544502Z caller=http.go:194 level=debug traceID=247cc58f1b84ae1d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.558768ms" +ts=2024-05-02T12:17:23.595091052Z caller=http.go:194 level=debug traceID=30626e7fe82189a3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.165975ms" +ts=2024-05-02T12:17:23.593965468Z caller=http.go:194 level=debug traceID=40b5ffd28c76778a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.733341ms" +ts=2024-05-02T12:17:23.593501237Z caller=http.go:194 level=debug traceID=3b823dedd54e73f5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.302384ms" +ts=2024-05-02T12:17:23.593073207Z caller=http.go:194 level=debug traceID=09fd51465627e3f9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.863893ms" +ts=2024-05-02T12:17:23.592225858Z caller=http.go:194 level=debug traceID=2de5d2596da6a012 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.631817ms" +ts=2024-05-02T12:17:23.591761814Z caller=http.go:194 level=debug traceID=6d42a417de570ec4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.701827ms" +ts=2024-05-02T12:17:23.591467441Z caller=http.go:194 level=debug traceID=18d884ac073bc139 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.691622ms" +ts=2024-05-02T12:17:23.5903698Z caller=http.go:194 level=debug traceID=1dcb5aa4775b3bf7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.266616ms" +ts=2024-05-02T12:17:23.590075966Z caller=http.go:194 level=debug traceID=3fa54868501a9941 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.702846ms" +ts=2024-05-02T12:17:23.58976551Z caller=http.go:194 level=debug traceID=6d99746281ec303e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.763791ms" +ts=2024-05-02T12:17:23.589223443Z caller=http.go:194 level=debug traceID=62b9e0c02cca97c3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.538589ms" +ts=2024-05-02T12:17:23.588558005Z caller=http.go:194 level=debug traceID=6611089a5229c8df orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.53775ms" +ts=2024-05-02T12:17:23.588037145Z caller=http.go:194 level=debug traceID=6f080bf86b834d59 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.593127ms" +ts=2024-05-02T12:17:23.586929292Z caller=http.go:194 level=debug traceID=4088a321ed2d2876 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.079751ms" +ts=2024-05-02T12:17:23.586370211Z caller=http.go:194 level=debug traceID=1f1f1b64e906657a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.472763ms" +ts=2024-05-02T12:17:23.585958128Z caller=http.go:194 level=debug traceID=1a1a534d5ce81f21 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.650795ms" +ts=2024-05-02T12:17:23.585208215Z caller=http.go:194 level=debug traceID=2af9801a4bd7ff98 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.448811ms" +ts=2024-05-02T12:17:23.584752895Z caller=http.go:194 level=debug traceID=49fc863ea90cfb93 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.944497ms" +ts=2024-05-02T12:17:23.584508675Z caller=http.go:194 level=debug traceID=62877c4f99acb2a7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.964453ms" +ts=2024-05-02T12:17:23.584357103Z caller=http.go:194 level=debug traceID=43e4157d0336b0e6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.202908ms" +ts=2024-05-02T12:17:23.584319958Z caller=http.go:194 level=debug traceID=5e5cd04f64c57295 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.164265ms" +ts=2024-05-02T12:17:23.583311807Z caller=http.go:194 level=debug traceID=2bb18fd9c6524dca orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.734152ms" +ts=2024-05-02T12:17:23.583191539Z caller=http.go:194 level=debug traceID=4bd91ad8cbe772a9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.584501ms" +ts=2024-05-02T12:17:23.582838133Z caller=http.go:194 level=debug traceID=2d365ce866e5da9a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.904574ms" +ts=2024-05-02T12:17:23.58274881Z caller=http.go:194 level=debug traceID=30626e7fe82189a3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.268326ms" +ts=2024-05-02T12:17:23.582587449Z caller=http.go:194 level=debug traceID=6d42a417de570ec4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.702376ms" +ts=2024-05-02T12:17:23.581446314Z caller=http.go:194 level=debug traceID=7d9f015f8b2a852a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.440629ms" +ts=2024-05-02T12:17:23.581511534Z caller=http.go:194 level=debug traceID=1679af5dcc49f45f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.101104ms" +ts=2024-05-02T12:17:23.581218645Z caller=http.go:194 level=debug traceID=42953649a479ac3b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.469278ms" +ts=2024-05-02T12:17:23.580678313Z caller=http.go:194 level=debug traceID=1dcb5aa4775b3bf7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.645461ms" +ts=2024-05-02T12:17:23.57961088Z caller=http.go:194 level=debug traceID=5d887658ba7492e5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.584308ms" +ts=2024-05-02T12:17:23.5795287Z caller=http.go:194 level=debug traceID=7d427fabd4b47e61 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.695354ms" +ts=2024-05-02T12:17:23.579049971Z caller=http.go:194 level=debug traceID=62b9e0c02cca97c3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.412078ms" +ts=2024-05-02T12:17:23.578207882Z caller=http.go:194 level=debug traceID=3fa54868501a9941 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.096082ms" +ts=2024-05-02T12:17:23.578138436Z caller=http.go:194 level=debug traceID=6611089a5229c8df orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.088827ms" +ts=2024-05-02T12:17:23.577573618Z caller=http.go:194 level=debug traceID=08831d38ad87ef64 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.809467ms" +ts=2024-05-02T12:17:23.576978489Z caller=http.go:194 level=debug traceID=6f080bf86b834d59 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.189215ms" +ts=2024-05-02T12:17:23.575901269Z caller=http.go:194 level=debug traceID=6c66905b3991c272 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.523352ms" +ts=2024-05-02T12:17:23.575536661Z caller=http.go:194 level=debug traceID=2004e547a69e1956 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.164605ms" +ts=2024-05-02T12:17:23.575180684Z caller=http.go:194 level=debug traceID=2742166193f5f214 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.489179ms" +ts=2024-05-02T12:17:23.574516811Z caller=http.go:194 level=debug traceID=5e5cd04f64c57295 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.467983ms" +ts=2024-05-02T12:17:23.574151955Z caller=http.go:194 level=debug traceID=43e4157d0336b0e6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.06374ms" +ts=2024-05-02T12:17:23.57417934Z caller=http.go:194 level=debug traceID=064ec23c5dd43a01 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.600803ms" +ts=2024-05-02T12:17:23.573590963Z caller=http.go:194 level=debug traceID=62877c4f99acb2a7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.298443ms" +ts=2024-05-02T12:17:23.573627547Z caller=http.go:194 level=debug traceID=2b8354b0a53aaa00 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.746582ms" +ts=2024-05-02T12:17:23.573234683Z caller=http.go:194 level=debug traceID=2bb18fd9c6524dca orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.122815ms" +ts=2024-05-02T12:17:23.572358888Z caller=http.go:194 level=debug traceID=21126559e76ee03f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.373187ms" +ts=2024-05-02T12:17:23.571593714Z caller=http.go:194 level=debug traceID=4bd91ad8cbe772a9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.488786ms" +ts=2024-05-02T12:17:23.571581039Z caller=http.go:194 level=debug traceID=2d365ce866e5da9a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.541454ms" +ts=2024-05-02T12:17:23.570764231Z caller=http.go:194 level=debug traceID=42953649a479ac3b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.906803ms" +ts=2024-05-02T12:17:23.570694254Z caller=http.go:194 level=debug traceID=1679af5dcc49f45f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.481632ms" +ts=2024-05-02T12:17:23.569624998Z caller=http.go:194 level=debug traceID=32162d97a914b6bc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.389925ms" +ts=2024-05-02T12:17:23.569462891Z caller=http.go:194 level=debug traceID=6422e6e881bc007d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.089399ms" +ts=2024-05-02T12:17:23.569420746Z caller=http.go:194 level=debug traceID=738f1291e0c8f7b6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.421612ms" +ts=2024-05-02T12:17:23.5692729Z caller=http.go:194 level=debug traceID=5368b452d5e366ea orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.122425ms" +ts=2024-05-02T12:17:23.569105488Z caller=http.go:194 level=debug traceID=7d427fabd4b47e61 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.770779ms" +ts=2024-05-02T12:17:23.569042498Z caller=http.go:194 level=debug traceID=1bd34bbe4f4848f8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.018141ms" +ts=2024-05-02T12:17:23.569140332Z caller=http.go:194 level=debug traceID=7d9f015f8b2a852a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.043403ms" +ts=2024-05-02T12:17:23.56842852Z caller=http.go:194 level=debug traceID=5d887658ba7492e5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.91855ms" +ts=2024-05-02T12:17:23.567726043Z caller=http.go:194 level=debug traceID=74e4c10b481cca5a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.964201ms" +ts=2024-05-02T12:17:23.567578787Z caller=http.go:194 level=debug traceID=16fabb6735f7cdd9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.638174ms" +ts=2024-05-02T12:17:23.566831604Z caller=http.go:194 level=debug traceID=1f343c9a525a7078 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.805295ms" +ts=2024-05-02T12:17:23.566240176Z caller=http.go:194 level=debug traceID=08831d38ad87ef64 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.610885ms" +ts=2024-05-02T12:17:23.56520921Z caller=http.go:194 level=debug traceID=6c66905b3991c272 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.905831ms" +ts=2024-05-02T12:17:23.564902839Z caller=http.go:194 level=debug traceID=2742166193f5f214 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.879382ms" +ts=2024-05-02T12:17:23.564563822Z caller=http.go:194 level=debug traceID=2004e547a69e1956 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.546792ms" +ts=2024-05-02T12:17:23.564229575Z caller=http.go:194 level=debug traceID=5f77bd127a100563 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.106726ms" +ts=2024-05-02T12:17:23.563482864Z caller=http.go:194 level=debug traceID=7d283ecc8fe0deb9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.633245ms" +ts=2024-05-02T12:17:23.56344384Z caller=http.go:194 level=debug traceID=2b8354b0a53aaa00 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.619608ms" +ts=2024-05-02T12:17:23.563112544Z caller=http.go:194 level=debug traceID=064ec23c5dd43a01 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.918033ms" +ts=2024-05-02T12:17:23.562811116Z caller=http.go:194 level=debug traceID=5e7316c7419919a9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.131698ms" +ts=2024-05-02T12:17:23.562846827Z caller=http.go:194 level=debug traceID=4797d9a941209c2f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.603469ms" +ts=2024-05-02T12:17:23.562276474Z caller=http.go:194 level=debug traceID=44fe2614cb3d7864 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.79038ms" +ts=2024-05-02T12:17:23.561277064Z caller=http.go:194 level=debug traceID=471e28cbede89a3b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.816148ms" +ts=2024-05-02T12:17:23.560711007Z caller=http.go:194 level=debug traceID=558781b19bed5189 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.545683ms" +ts=2024-05-02T12:17:23.560529043Z caller=http.go:194 level=debug traceID=4165499d19a7d823 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.345285ms" +ts=2024-05-02T12:17:23.55990227Z caller=http.go:194 level=debug traceID=21126559e76ee03f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.099451ms" +ts=2024-05-02T12:17:23.559277998Z caller=http.go:194 level=debug traceID=738f1291e0c8f7b6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.111525ms" +ts=2024-05-02T12:17:23.559139921Z caller=http.go:194 level=debug traceID=3563025ae6e25623 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.510307ms" +ts=2024-05-02T12:17:23.558927934Z caller=http.go:194 level=debug traceID=32162d97a914b6bc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.886022ms" +ts=2024-05-02T12:17:23.558894356Z caller=http.go:194 level=debug traceID=425b60155cb2cc69 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.622069ms" +ts=2024-05-02T12:17:23.558535213Z caller=http.go:194 level=debug traceID=3c2fd37e49c407e0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.63674ms" +ts=2024-05-02T12:17:23.558460374Z caller=http.go:194 level=debug traceID=0a25380f13252468 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.271572ms" +ts=2024-05-02T12:17:23.558299842Z caller=http.go:194 level=debug traceID=6422e6e881bc007d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.975052ms" +ts=2024-05-02T12:17:23.558210706Z caller=http.go:194 level=debug traceID=7f11ba96b3f73b47 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.305607ms" +ts=2024-05-02T12:17:23.557463056Z caller=http.go:194 level=debug traceID=74e4c10b481cca5a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.151724ms" +ts=2024-05-02T12:17:23.55724729Z caller=http.go:194 level=debug traceID=5368b452d5e366ea orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.796642ms" +ts=2024-05-02T12:17:23.556901757Z caller=http.go:194 level=debug traceID=5f362de652791758 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.222309ms" +ts=2024-05-02T12:17:23.556217915Z caller=http.go:194 level=debug traceID=16fabb6735f7cdd9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.892346ms" +ts=2024-05-02T12:17:23.556109882Z caller=http.go:194 level=debug traceID=1f343c9a525a7078 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.172636ms" +ts=2024-05-02T12:17:23.555423966Z caller=http.go:194 level=debug traceID=1bd34bbe4f4848f8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.027451ms" +ts=2024-05-02T12:17:23.554945288Z caller=http.go:194 level=debug traceID=5f77bd127a100563 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.915676ms" +ts=2024-05-02T12:17:23.553946365Z caller=http.go:194 level=debug traceID=205f51fbe29a6ac2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.876823ms" +ts=2024-05-02T12:17:23.553552382Z caller=http.go:194 level=debug traceID=0870bf719dbf0811 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.788955ms" +ts=2024-05-02T12:17:23.553324585Z caller=http.go:194 level=debug traceID=1ea4a4eaa9329170 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.569011ms" +ts=2024-05-02T12:17:23.55314928Z caller=http.go:194 level=debug traceID=2d0a5c6f91580ee7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.582844ms" +ts=2024-05-02T12:17:23.552884481Z caller=http.go:194 level=debug traceID=4bea8b409cd99dd1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.666015ms" +ts=2024-05-02T12:17:23.552462316Z caller=http.go:194 level=debug traceID=5b11c7592576dc2d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.581192ms" +ts=2024-05-02T12:17:23.551647159Z caller=http.go:194 level=debug traceID=7d283ecc8fe0deb9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.511138ms" +ts=2024-05-02T12:17:23.551277337Z caller=http.go:194 level=debug traceID=4797d9a941209c2f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.371858ms" +ts=2024-05-02T12:17:23.551148333Z caller=http.go:194 level=debug traceID=6e2c9c7fe808d3a4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.346033ms" +ts=2024-05-02T12:17:23.551015769Z caller=http.go:194 level=debug traceID=44fe2614cb3d7864 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.718688ms" +ts=2024-05-02T12:17:23.550886298Z caller=http.go:194 level=debug traceID=73d1c26038ebe689 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.332282ms" +ts=2024-05-02T12:17:23.550024543Z caller=http.go:194 level=debug traceID=17baaa5a6e4f36aa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.773462ms" +ts=2024-05-02T12:17:23.54980808Z caller=http.go:194 level=debug traceID=4165499d19a7d823 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.601914ms" +ts=2024-05-02T12:17:23.549776558Z caller=http.go:194 level=debug traceID=471e28cbede89a3b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.414652ms" +ts=2024-05-02T12:17:23.549143038Z caller=http.go:194 level=debug traceID=5e7316c7419919a9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.482647ms" +ts=2024-05-02T12:17:23.549116077Z caller=http.go:194 level=debug traceID=5f9ad858a8fa0752 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.813622ms" +ts=2024-05-02T12:17:23.548646247Z caller=http.go:194 level=debug traceID=425b60155cb2cc69 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.748291ms" +ts=2024-05-02T12:17:23.548458661Z caller=http.go:194 level=debug traceID=69421d8676491c30 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.68014ms" +ts=2024-05-02T12:17:23.548302993Z caller=http.go:194 level=debug traceID=5b9a24d20cdf529a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.541307ms" +ts=2024-05-02T12:17:23.548137912Z caller=http.go:194 level=debug traceID=0c44fe2bafd61333 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.91059ms" +ts=2024-05-02T12:17:23.548148236Z caller=http.go:194 level=debug traceID=558781b19bed5189 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.080752ms" +ts=2024-05-02T12:17:23.547929942Z caller=http.go:194 level=debug traceID=3563025ae6e25623 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.618699ms" +ts=2024-05-02T12:17:23.547519181Z caller=http.go:194 level=debug traceID=3c2fd37e49c407e0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.483558ms" +ts=2024-05-02T12:17:23.547388356Z caller=http.go:194 level=debug traceID=0a25380f13252468 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.367887ms" +ts=2024-05-02T12:17:23.546325673Z caller=http.go:194 level=debug traceID=7f11ba96b3f73b47 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.534213ms" +ts=2024-05-02T12:17:23.545975941Z caller=http.go:194 level=debug traceID=5f362de652791758 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.454178ms" +ts=2024-05-02T12:17:23.545807675Z caller=http.go:194 level=debug traceID=679316b2b9a85b5d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.828161ms" +ts=2024-05-02T12:17:23.544870471Z caller=http.go:194 level=debug traceID=628d0546402e7ee9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.305194ms" +ts=2024-05-02T12:17:23.544418546Z caller=http.go:194 level=debug traceID=1c978ac03529fd4f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.038946ms" +ts=2024-05-02T12:17:23.543874042Z caller=http.go:194 level=debug traceID=0870bf719dbf0811 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.977978ms" +ts=2024-05-02T12:17:23.543009613Z caller=http.go:194 level=debug traceID=68e8112e616d7862 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.987097ms" +ts=2024-05-02T12:17:23.543108333Z caller=http.go:194 level=debug traceID=1ea4a4eaa9329170 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.662509ms" +ts=2024-05-02T12:17:23.542953491Z caller=http.go:194 level=debug traceID=0a14e6073e25b885 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.077174ms" +ts=2024-05-02T12:17:23.5428583Z caller=http.go:194 level=debug traceID=2d0a5c6f91580ee7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.393637ms" +ts=2024-05-02T12:17:23.542681147Z caller=http.go:194 level=debug traceID=4e93cafe6dfa5e47 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.303908ms" +ts=2024-05-02T12:17:23.542541982Z caller=http.go:194 level=debug traceID=4bea8b409cd99dd1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.429886ms" +ts=2024-05-02T12:17:23.541614838Z caller=http.go:194 level=debug traceID=160bf0f6e0772333 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 305.192µs" +ts=2024-05-02T12:17:23.540950286Z caller=http.go:194 level=debug traceID=205f51fbe29a6ac2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.240439ms" +ts=2024-05-02T12:17:23.540624309Z caller=http.go:194 level=debug traceID=5f9ad858a8fa0752 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.011831ms" +ts=2024-05-02T12:17:23.539989256Z caller=http.go:194 level=debug traceID=5b11c7592576dc2d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.254039ms" +ts=2024-05-02T12:17:23.539961617Z caller=http.go:194 level=debug traceID=299475f96f727c78 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.792051ms" +ts=2024-05-02T12:17:23.539589982Z caller=http.go:194 level=debug traceID=6e2c9c7fe808d3a4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.805087ms" +ts=2024-05-02T12:17:23.53941645Z caller=http.go:194 level=debug traceID=73d1c26038ebe689 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.618524ms" +ts=2024-05-02T12:17:23.538503367Z caller=http.go:194 level=debug traceID=17baaa5a6e4f36aa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.783252ms" +ts=2024-05-02T12:17:23.538082013Z caller=http.go:194 level=debug traceID=568effa0401a2c38 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.576128ms" +ts=2024-05-02T12:17:23.537220888Z caller=http.go:194 level=debug traceID=0c44fe2bafd61333 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.130187ms" +ts=2024-05-02T12:17:23.536875447Z caller=http.go:194 level=debug traceID=30b724b43eff0d83 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.921349ms" +ts=2024-05-02T12:17:23.53676096Z caller=http.go:194 level=debug traceID=69421d8676491c30 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.226051ms" +ts=2024-05-02T12:17:23.535944227Z caller=http.go:194 level=debug traceID=5b9a24d20cdf529a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.944556ms" +ts=2024-05-02T12:17:23.53593799Z caller=http.go:194 level=debug traceID=6f6dbb15825cf4a4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.918301ms" +ts=2024-05-02T12:17:23.535505777Z caller=http.go:194 level=debug traceID=628d0546402e7ee9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.751566ms" +ts=2024-05-02T12:17:23.535486737Z caller=http.go:194 level=debug traceID=679316b2b9a85b5d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.729685ms" +ts=2024-05-02T12:17:23.535336926Z caller=http.go:194 level=debug traceID=3c93bdfa8eedfc67 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.814736ms" +ts=2024-05-02T12:17:23.53457122Z caller=http.go:194 level=debug traceID=43db39cee58a9ce3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.373684ms" +ts=2024-05-02T12:17:23.534269394Z caller=http.go:194 level=debug traceID=182ed43bb0b9828a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.509753ms" +ts=2024-05-02T12:17:23.534297724Z caller=http.go:194 level=debug traceID=0721d7264ed3ea91 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.801878ms" +ts=2024-05-02T12:17:23.533254298Z caller=http.go:194 level=debug traceID=53598d72d465007a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.976138ms" +ts=2024-05-02T12:17:23.532906118Z caller=http.go:194 level=debug traceID=68b67148ef338dc5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.745855ms" +ts=2024-05-02T12:17:23.532183255Z caller=http.go:194 level=debug traceID=1c978ac03529fd4f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.845797ms" +ts=2024-05-02T12:17:23.532083057Z caller=http.go:194 level=debug traceID=223535ca6c494521 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.752685ms" +ts=2024-05-02T12:17:23.532118707Z caller=http.go:194 level=debug traceID=68e8112e616d7862 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.315358ms" +ts=2024-05-02T12:17:23.531929648Z caller=http.go:194 level=debug traceID=63d12a5a480aae3a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.226538ms" +ts=2024-05-02T12:17:23.531870563Z caller=http.go:194 level=debug traceID=4e93cafe6dfa5e47 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.849021ms" +ts=2024-05-02T12:17:23.531342067Z caller=http.go:194 level=debug traceID=160bf0f6e0772333 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 374.552µs" +ts=2024-05-02T12:17:23.531046582Z caller=http.go:194 level=debug traceID=3e758ae7f6c4a79d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.859551ms" +ts=2024-05-02T12:17:23.530785433Z caller=http.go:194 level=debug traceID=36c29dce46df98ca orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.53498ms" +ts=2024-05-02T12:17:23.530410899Z caller=http.go:194 level=debug traceID=0a14e6073e25b885 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.868858ms" +ts=2024-05-02T12:17:23.530150131Z caller=http.go:194 level=debug traceID=299475f96f727c78 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.216934ms" +ts=2024-05-02T12:17:23.529321861Z caller=http.go:194 level=debug traceID=647ea61c678203a9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 239.789µs" +ts=2024-05-02T12:17:23.529074409Z caller=http.go:194 level=debug traceID=24141b9643174304 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.314935ms" +ts=2024-05-02T12:17:23.528785356Z caller=http.go:194 level=debug traceID=4b8b331f22d8c9b4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.020876ms" +ts=2024-05-02T12:17:23.528651358Z caller=http.go:194 level=debug traceID=568effa0401a2c38 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.198627ms" +ts=2024-05-02T12:17:23.528523355Z caller=http.go:194 level=debug traceID=31d67fb839b93a74 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.34972ms" +ts=2024-05-02T12:17:23.528359229Z caller=http.go:194 level=debug traceID=1bff638862aabd8b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.336761ms" +ts=2024-05-02T12:17:23.528146427Z caller=http.go:194 level=debug traceID=64f7d7928016fce2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.468254ms" +ts=2024-05-02T12:17:23.528122046Z caller=http.go:194 level=debug traceID=40b9edcb25c87f6e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.924579ms" +ts=2024-05-02T12:17:23.527369733Z caller=http.go:194 level=debug traceID=71b6e1973c26b005 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.096691ms" +ts=2024-05-02T12:17:23.526607888Z caller=http.go:194 level=debug traceID=0721d7264ed3ea91 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.743878ms" +ts=2024-05-02T12:17:23.52631049Z caller=http.go:194 level=debug traceID=477100474ecea6f9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.078934ms" +ts=2024-05-02T12:17:23.526017576Z caller=http.go:194 level=debug traceID=4812f11088080164 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.396835ms" +ts=2024-05-02T12:17:23.525094797Z caller=http.go:194 level=debug traceID=30b724b43eff0d83 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.968397ms" +ts=2024-05-02T12:17:23.525081891Z caller=http.go:194 level=debug traceID=0e38bb4ebe2a2081 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.199883ms" +ts=2024-05-02T12:17:23.524619933Z caller=http.go:194 level=debug traceID=6f6dbb15825cf4a4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.373668ms" +ts=2024-05-02T12:17:23.524393736Z caller=http.go:194 level=debug traceID=3c93bdfa8eedfc67 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.987229ms" +ts=2024-05-02T12:17:23.523864549Z caller=http.go:194 level=debug traceID=43db39cee58a9ce3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.934985ms" +ts=2024-05-02T12:17:23.523126579Z caller=http.go:194 level=debug traceID=7801a7815a7d62c4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.914741ms" +ts=2024-05-02T12:17:23.52269671Z caller=http.go:194 level=debug traceID=3207ea88071065d3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.715522ms" +ts=2024-05-02T12:17:23.522438217Z caller=http.go:194 level=debug traceID=182ed43bb0b9828a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.359234ms" +ts=2024-05-02T12:17:23.522533073Z caller=http.go:194 level=debug traceID=3f9057b7d108089b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.298902ms" +ts=2024-05-02T12:17:23.522210939Z caller=http.go:194 level=debug traceID=53598d72d465007a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.985753ms" +ts=2024-05-02T12:17:23.521456035Z caller=http.go:194 level=debug traceID=68b67148ef338dc5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.676567ms" +ts=2024-05-02T12:17:23.521216984Z caller=http.go:194 level=debug traceID=3e758ae7f6c4a79d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.022827ms" +ts=2024-05-02T12:17:23.521120273Z caller=http.go:194 level=debug traceID=63d12a5a480aae3a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.553569ms" +ts=2024-05-02T12:17:23.520605641Z caller=http.go:194 level=debug traceID=69026e1cf3ea66d1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.002056ms" +ts=2024-05-02T12:17:23.520397146Z caller=http.go:194 level=debug traceID=223535ca6c494521 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.601406ms" +ts=2024-05-02T12:17:23.519000127Z caller=http.go:194 level=debug traceID=43f8b7aa53856ec6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.26138ms" +ts=2024-05-02T12:17:23.518404038Z caller=http.go:194 level=debug traceID=647ea61c678203a9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 373.296µs" +ts=2024-05-02T12:17:23.518154192Z caller=http.go:194 level=debug traceID=24141b9643174304 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.665688ms" +ts=2024-05-02T12:17:23.518156718Z caller=http.go:194 level=debug traceID=4f88c19cbea0a757 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 41.039485ms" +ts=2024-05-02T12:17:23.517860687Z caller=http.go:194 level=debug traceID=7e9c7ccd7ebd79d5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.404536ms" +ts=2024-05-02T12:17:23.517692818Z caller=http.go:194 level=debug traceID=7ba842854243709f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.408442ms" +ts=2024-05-02T12:17:23.517582158Z caller=http.go:194 level=debug traceID=36c29dce46df98ca orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.770361ms" +ts=2024-05-02T12:17:23.517406001Z caller=http.go:194 level=debug traceID=4b8b331f22d8c9b4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.122999ms" +ts=2024-05-02T12:17:23.517195587Z caller=http.go:194 level=debug traceID=40608f329dca9a65 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.963654ms" +ts=2024-05-02T12:17:23.516889499Z caller=http.go:194 level=debug traceID=31d67fb839b93a74 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.141958ms" +ts=2024-05-02T12:17:23.516851293Z caller=http.go:194 level=debug traceID=181bf4a9452c1f22 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.21039ms" +ts=2024-05-02T12:17:23.516658741Z caller=http.go:194 level=debug traceID=1bff638862aabd8b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.931384ms" +ts=2024-05-02T12:17:23.516631683Z caller=http.go:194 level=debug traceID=71b6e1973c26b005 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.819333ms" +ts=2024-05-02T12:17:23.516133845Z caller=http.go:194 level=debug traceID=40b9edcb25c87f6e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.53506ms" +ts=2024-05-02T12:17:23.515655703Z caller=http.go:194 level=debug traceID=477100474ecea6f9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.500604ms" +ts=2024-05-02T12:17:23.515463922Z caller=http.go:194 level=debug traceID=570e7a20f1d191b7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.022869ms" +ts=2024-05-02T12:17:23.515353524Z caller=http.go:194 level=debug traceID=4812f11088080164 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.83116ms" +ts=2024-05-02T12:17:23.514355138Z caller=http.go:194 level=debug traceID=0e38bb4ebe2a2081 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.561696ms" +ts=2024-05-02T12:17:23.513979447Z caller=http.go:194 level=debug traceID=64f7d7928016fce2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.641987ms" +ts=2024-05-02T12:17:23.513355463Z caller=http.go:194 level=debug traceID=7801a7815a7d62c4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.071671ms" +ts=2024-05-02T12:17:23.512842979Z caller=http.go:194 level=debug traceID=6a6ee8601bec6ab5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.553621ms" +ts=2024-05-02T12:17:23.512607103Z caller=http.go:194 level=debug traceID=67d3613c6cc42704 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.82274ms" +ts=2024-05-02T12:17:23.512001261Z caller=http.go:194 level=debug traceID=5234893a5eeb9705 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.523834ms" +ts=2024-05-02T12:17:23.51199063Z caller=http.go:194 level=debug traceID=3f9057b7d108089b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.062039ms" +ts=2024-05-02T12:17:23.511243853Z caller=http.go:194 level=debug traceID=08bc5adb944b0e7e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.509831ms" +ts=2024-05-02T12:17:23.510006498Z caller=http.go:194 level=debug traceID=000bdf4c14b52be7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.066194ms" +ts=2024-05-02T12:17:23.509589403Z caller=http.go:194 level=debug traceID=69026e1cf3ea66d1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.787689ms" +ts=2024-05-02T12:17:23.509090371Z caller=http.go:194 level=debug traceID=3207ea88071065d3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.857608ms" +ts=2024-05-02T12:17:23.508088735Z caller=http.go:194 level=debug traceID=43f8b7aa53856ec6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.675453ms" +ts=2024-05-02T12:17:23.508087145Z caller=http.go:194 level=debug traceID=1eeb87a669aebe07 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.476482ms" +ts=2024-05-02T12:17:23.507634161Z caller=http.go:194 level=debug traceID=7e9c7ccd7ebd79d5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.153036ms" +ts=2024-05-02T12:17:23.507617738Z caller=http.go:194 level=debug traceID=572e1d12197fb400 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.765024ms" +ts=2024-05-02T12:17:23.507660696Z caller=http.go:194 level=debug traceID=65dcb34b4e5b7f74 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.351973ms" +ts=2024-05-02T12:17:23.507313654Z caller=http.go:194 level=debug traceID=27c3f3423d3ea959 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 104.150555ms" +ts=2024-05-02T12:17:23.506864107Z caller=http.go:194 level=debug traceID=10d060d8727cbe44 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.745015ms" +ts=2024-05-02T12:17:23.506627137Z caller=http.go:194 level=debug traceID=63d8846c7dcc0023 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 16.098365ms" +ts=2024-05-02T12:17:23.506525245Z caller=http.go:194 level=debug traceID=7ba842854243709f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.931358ms" +ts=2024-05-02T12:17:23.506098218Z caller=http.go:194 level=debug traceID=40608f329dca9a65 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.151298ms" +ts=2024-05-02T12:17:23.506013385Z caller=http.go:194 level=debug traceID=181bf4a9452c1f22 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.431359ms" +ts=2024-05-02T12:17:23.505579265Z caller=http.go:194 level=debug traceID=570e7a20f1d191b7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.331493ms" +ts=2024-05-02T12:17:23.503053209Z caller=http.go:194 level=debug traceID=59b42c858ad5ef40 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.052074ms" +ts=2024-05-02T12:17:23.502887609Z caller=http.go:194 level=debug traceID=3c46f65bc14b4056 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.788643ms" +ts=2024-05-02T12:17:23.502046485Z caller=http.go:194 level=debug traceID=68786058084a6d13 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 18.789273ms" +ts=2024-05-02T12:17:23.502122906Z caller=http.go:194 level=debug traceID=67d3613c6cc42704 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.530095ms" +ts=2024-05-02T12:17:23.501686802Z caller=http.go:194 level=debug traceID=4a7069adf7d970a6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.791628ms" +ts=2024-05-02T12:17:23.501453485Z caller=http.go:194 level=debug traceID=6a6ee8601bec6ab5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.809626ms" +ts=2024-05-02T12:17:23.500788464Z caller=http.go:194 level=debug traceID=2e32c7d9a512dbe3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.482919ms" +ts=2024-05-02T12:17:23.499730362Z caller=http.go:194 level=debug traceID=5234893a5eeb9705 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.175522ms" +ts=2024-05-02T12:17:23.499611956Z caller=http.go:194 level=debug traceID=000bdf4c14b52be7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.737781ms" +ts=2024-05-02T12:17:23.499551677Z caller=http.go:194 level=debug traceID=2b79ab6dadde0b6c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.653701ms" +ts=2024-05-02T12:17:23.499516715Z caller=http.go:194 level=debug traceID=23aba6cdbfadd485 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.100576ms" +ts=2024-05-02T12:17:23.499456352Z caller=http.go:194 level=debug traceID=08bc5adb944b0e7e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.21084ms" +ts=2024-05-02T12:17:23.499357934Z caller=http.go:194 level=debug traceID=0a50a8f287971b78 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.244282ms" +ts=2024-05-02T12:17:23.498660718Z caller=http.go:194 level=debug traceID=34a2e862f272e69c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.383222ms" +ts=2024-05-02T12:17:23.498331422Z caller=http.go:194 level=debug traceID=1eeb87a669aebe07 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.088208ms" +ts=2024-05-02T12:17:23.49798793Z caller=http.go:194 level=debug traceID=63d8846c7dcc0023 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 19.07602ms" +ts=2024-05-02T12:17:23.497937376Z caller=http.go:194 level=debug traceID=5ff6359cd076da92 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.378145ms" +ts=2024-05-02T12:17:23.497487087Z caller=http.go:194 level=debug traceID=65dcb34b4e5b7f74 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.29083ms" +ts=2024-05-02T12:17:23.496840884Z caller=http.go:194 level=debug traceID=5b14c2e99c84183b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.255757ms" +ts=2024-05-02T12:17:23.496510386Z caller=http.go:194 level=debug traceID=10d060d8727cbe44 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.51777ms" +ts=2024-05-02T12:17:23.495856357Z caller=http.go:194 level=debug traceID=79e3f03f3955fe1c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.403502ms" +ts=2024-05-02T12:17:23.494911505Z caller=http.go:194 level=debug traceID=5d4d8d118cb3ec32 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.319951ms" +ts=2024-05-02T12:17:23.494874931Z caller=http.go:194 level=debug traceID=572e1d12197fb400 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.992097ms" +ts=2024-05-02T12:17:23.49390817Z caller=http.go:194 level=debug traceID=1806e7bc5aa0e99a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.642878ms" +ts=2024-05-02T12:17:23.493764194Z caller=http.go:194 level=debug traceID=21cb9fae21cd9730 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.341112ms" +ts=2024-05-02T12:17:23.493598372Z caller=http.go:194 level=debug traceID=5d192bfec77aa42d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.484254ms" +ts=2024-05-02T12:17:23.491997148Z caller=http.go:194 level=debug traceID=3c46f65bc14b4056 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.319114ms" +ts=2024-05-02T12:17:23.491615741Z caller=http.go:194 level=debug traceID=3423e9e29111398c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.908496ms" +ts=2024-05-02T12:17:23.491452414Z caller=http.go:194 level=debug traceID=59b42c858ad5ef40 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.298476ms" +ts=2024-05-02T12:17:23.491394498Z caller=http.go:194 level=debug traceID=4a7069adf7d970a6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.729695ms" +ts=2024-05-02T12:17:23.489959931Z caller=http.go:194 level=debug traceID=0a50a8f287971b78 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.77258ms" +ts=2024-05-02T12:17:23.489927257Z caller=http.go:194 level=debug traceID=23aba6cdbfadd485 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.017003ms" +ts=2024-05-02T12:17:23.489067204Z caller=http.go:194 level=debug traceID=2e32c7d9a512dbe3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.026141ms" +ts=2024-05-02T12:17:23.488795339Z caller=http.go:194 level=debug traceID=2b79ab6dadde0b6c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.636789ms" +ts=2024-05-02T12:17:23.488595772Z caller=http.go:194 level=debug traceID=34a2e862f272e69c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.526339ms" +ts=2024-05-02T12:17:23.487262843Z caller=http.go:194 level=debug traceID=0a5ebadf69c3e67a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.98685ms" +ts=2024-05-02T12:17:23.486642108Z caller=http.go:194 level=debug traceID=5ff6359cd076da92 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.363263ms" +ts=2024-05-02T12:17:23.486406948Z caller=http.go:194 level=debug traceID=68786058084a6d13 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.106161ms" +ts=2024-05-02T12:17:23.486282045Z caller=http.go:194 level=debug traceID=037446c5ec547182 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.626758ms" +ts=2024-05-02T12:17:23.485713006Z caller=http.go:194 level=debug traceID=791d97694af591e6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.914414ms" +ts=2024-05-02T12:17:23.485638204Z caller=http.go:194 level=debug traceID=7aa56564dddf04bc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.267757ms" +ts=2024-05-02T12:17:23.484539591Z caller=http.go:194 level=debug traceID=5b14c2e99c84183b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.949015ms" +ts=2024-05-02T12:17:23.48453594Z caller=http.go:194 level=debug traceID=18387f2798942bd0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.193156ms" +ts=2024-05-02T12:17:23.484360843Z caller=http.go:194 level=debug traceID=0954bf3db739c1f8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.922272ms" +ts=2024-05-02T12:17:23.483767219Z caller=http.go:194 level=debug traceID=4619be47957b637a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.094584ms" +ts=2024-05-02T12:17:23.483432433Z caller=http.go:194 level=debug traceID=79e3f03f3955fe1c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.779298ms" +ts=2024-05-02T12:17:23.483183771Z caller=http.go:194 level=debug traceID=4f88c19cbea0a757 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 18.116872ms" +ts=2024-05-02T12:17:23.482906999Z caller=http.go:194 level=debug traceID=5d192bfec77aa42d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.832656ms" +ts=2024-05-02T12:17:23.482743976Z caller=http.go:194 level=debug traceID=1806e7bc5aa0e99a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.851917ms" +ts=2024-05-02T12:17:23.482533515Z caller=http.go:194 level=debug traceID=21cb9fae21cd9730 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.898648ms" +ts=2024-05-02T12:17:23.481911167Z caller=http.go:194 level=debug traceID=5d0ca8052a53169f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.62382ms" +ts=2024-05-02T12:17:23.479862675Z caller=http.go:194 level=debug traceID=3423e9e29111398c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.405757ms" +ts=2024-05-02T12:17:23.479599963Z caller=http.go:194 level=debug traceID=691a2e0686f90fe0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.579397ms" +ts=2024-05-02T12:17:23.478639943Z caller=http.go:194 level=debug traceID=5d4d8d118cb3ec32 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.93829ms" +ts=2024-05-02T12:17:23.478416891Z caller=http.go:194 level=debug traceID=7bfd4a7868d9f263 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.40637ms" +ts=2024-05-02T12:17:23.478103245Z caller=http.go:194 level=debug traceID=1fb6a9560606dd7c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.163048ms" +ts=2024-05-02T12:17:23.477861142Z caller=http.go:194 level=debug traceID=592d4ce218ed885b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.073883ms" +ts=2024-05-02T12:17:23.475981308Z caller=http.go:194 level=debug traceID=18387f2798942bd0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.342818ms" +ts=2024-05-02T12:17:23.475809276Z caller=http.go:194 level=debug traceID=0a5ebadf69c3e67a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.720251ms" +ts=2024-05-02T12:17:23.475534423Z caller=http.go:194 level=debug traceID=791d97694af591e6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.928406ms" +ts=2024-05-02T12:17:23.474818925Z caller=http.go:194 level=debug traceID=037446c5ec547182 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.789715ms" +ts=2024-05-02T12:17:23.474376075Z caller=http.go:194 level=debug traceID=77f763a13074d312 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.102116ms" +ts=2024-05-02T12:17:23.474402497Z caller=http.go:194 level=debug traceID=7aa56564dddf04bc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.19692ms" +ts=2024-05-02T12:17:23.47396355Z caller=http.go:194 level=debug traceID=4619be47957b637a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.384484ms" +ts=2024-05-02T12:17:23.47401627Z caller=http.go:194 level=debug traceID=6cd6b590cab77e01 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.632239ms" +ts=2024-05-02T12:17:23.473548198Z caller=http.go:194 level=debug traceID=3d3ceba3c3721fb9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.946993ms" +ts=2024-05-02T12:17:23.47332155Z caller=http.go:194 level=debug traceID=55d707ea3ec09552 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 295.906µs" +ts=2024-05-02T12:17:23.472766178Z caller=http.go:194 level=debug traceID=0954bf3db739c1f8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.674184ms" +ts=2024-05-02T12:17:23.472150907Z caller=http.go:194 level=debug traceID=24b0aa5256dc561c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.899346ms" +ts=2024-05-02T12:17:23.47138443Z caller=http.go:194 level=debug traceID=5d0ca8052a53169f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.276378ms" +ts=2024-05-02T12:17:23.470349207Z caller=http.go:194 level=debug traceID=020331b4888c64e3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.613688ms" +ts=2024-05-02T12:17:23.47013983Z caller=http.go:194 level=debug traceID=2aca71ffadca72c5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.385503ms" +ts=2024-05-02T12:17:23.469260118Z caller=http.go:194 level=debug traceID=1071e78f9b1453f0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.51097ms" +ts=2024-05-02T12:17:23.468696094Z caller=http.go:194 level=debug traceID=7bfd4a7868d9f263 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.662155ms" +ts=2024-05-02T12:17:23.467199122Z caller=http.go:194 level=debug traceID=5115576184d8240c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.447439ms" +ts=2024-05-02T12:17:23.467104354Z caller=http.go:194 level=debug traceID=691a2e0686f90fe0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.537563ms" +ts=2024-05-02T12:17:23.466880635Z caller=http.go:194 level=debug traceID=1fb6a9560606dd7c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.161812ms" +ts=2024-05-02T12:17:23.466614976Z caller=http.go:194 level=debug traceID=592d4ce218ed885b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.893187ms" +ts=2024-05-02T12:17:23.464830326Z caller=http.go:194 level=debug traceID=6b044397350916ed orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.93936ms" +ts=2024-05-02T12:17:23.464589953Z caller=http.go:194 level=debug traceID=1b14be3ecd1ae9f9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.771324ms" +ts=2024-05-02T12:17:23.464146643Z caller=http.go:194 level=debug traceID=332513d85414b3a0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.327693ms" +ts=2024-05-02T12:17:23.463923746Z caller=http.go:194 level=debug traceID=6cd6b590cab77e01 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.002317ms" +ts=2024-05-02T12:17:23.46383887Z caller=http.go:194 level=debug traceID=6ea86ad14956d5b0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.441107ms" +ts=2024-05-02T12:17:23.463317609Z caller=http.go:194 level=debug traceID=7f6cd55a6f5f5a50 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.084154ms" +ts=2024-05-02T12:17:23.463146494Z caller=http.go:194 level=debug traceID=3d3ceba3c3721fb9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.663018ms" +ts=2024-05-02T12:17:23.462378296Z caller=http.go:194 level=debug traceID=77f763a13074d312 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.692884ms" +ts=2024-05-02T12:17:23.462205515Z caller=http.go:194 level=debug traceID=55d707ea3ec09552 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 331.624µs" +ts=2024-05-02T12:17:23.461037532Z caller=http.go:194 level=debug traceID=24b0aa5256dc561c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.964985ms" +ts=2024-05-02T12:17:23.460358701Z caller=http.go:194 level=debug traceID=68cd382e174afefb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.27771ms" +ts=2024-05-02T12:17:23.460199155Z caller=http.go:194 level=debug traceID=0ccd8eb1e31a24af orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.893809ms" +ts=2024-05-02T12:17:23.459509276Z caller=http.go:194 level=debug traceID=1071e78f9b1453f0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.7996ms" +ts=2024-05-02T12:17:23.458916078Z caller=http.go:194 level=debug traceID=2aca71ffadca72c5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.435331ms" +ts=2024-05-02T12:17:23.458305851Z caller=http.go:194 level=debug traceID=020331b4888c64e3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.476953ms" +ts=2024-05-02T12:17:23.458195334Z caller=http.go:194 level=debug traceID=275c7c9b68a9080a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.207655ms" +ts=2024-05-02T12:17:23.457896979Z caller=http.go:194 level=debug traceID=0f9f60e69c747de7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.642722ms" +ts=2024-05-02T12:17:23.456303398Z caller=http.go:194 level=debug traceID=5a0dbe66741c2749 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.464633ms" +ts=2024-05-02T12:17:23.456068274Z caller=http.go:194 level=debug traceID=06e23f9b15026507 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.768277ms" +ts=2024-05-02T12:17:23.455591221Z caller=http.go:194 level=debug traceID=317e08b29603a841 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.172159ms" +ts=2024-05-02T12:17:23.455510608Z caller=http.go:194 level=debug traceID=4a590c0f91cf9202 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.937783ms" +ts=2024-05-02T12:17:23.455099464Z caller=http.go:194 level=debug traceID=5115576184d8240c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.025647ms" +ts=2024-05-02T12:17:23.453660523Z caller=http.go:194 level=debug traceID=6b044397350916ed orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.863511ms" +ts=2024-05-02T12:17:23.45366575Z caller=http.go:194 level=debug traceID=32dbb3b2e2a46cdf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.915658ms" +ts=2024-05-02T12:17:23.453290374Z caller=http.go:194 level=debug traceID=6ea86ad14956d5b0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.877181ms" +ts=2024-05-02T12:17:23.453294431Z caller=http.go:194 level=debug traceID=332513d85414b3a0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.364137ms" +ts=2024-05-02T12:17:23.453207405Z caller=http.go:194 level=debug traceID=1b14be3ecd1ae9f9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.599538ms" +ts=2024-05-02T12:17:23.452125144Z caller=http.go:194 level=debug traceID=7f6cd55a6f5f5a50 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.176802ms" +ts=2024-05-02T12:17:23.451008891Z caller=http.go:194 level=debug traceID=7a6a0613eb228f2c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.014198ms" +ts=2024-05-02T12:17:23.45089232Z caller=http.go:194 level=debug traceID=4c8560854c2e8d4c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.269601ms" +ts=2024-05-02T12:17:23.450713489Z caller=http.go:194 level=debug traceID=494f249e0530f04a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.651003ms" +ts=2024-05-02T12:17:23.450690233Z caller=http.go:194 level=debug traceID=65dff31841b60e38 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.69861ms" +ts=2024-05-02T12:17:23.450223944Z caller=http.go:194 level=debug traceID=7b9f7f66bdda56c3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.18644ms" +ts=2024-05-02T12:17:23.44909475Z caller=http.go:194 level=debug traceID=68cd382e174afefb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.366029ms" +ts=2024-05-02T12:17:23.448185716Z caller=http.go:194 level=debug traceID=71c85393b6792463 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.383066ms" +ts=2024-05-02T12:17:23.446997378Z caller=http.go:194 level=debug traceID=275c7c9b68a9080a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.155387ms" +ts=2024-05-02T12:17:23.446050958Z caller=http.go:194 level=debug traceID=4a590c0f91cf9202 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.459061ms" +ts=2024-05-02T12:17:23.446058665Z caller=http.go:194 level=debug traceID=06e23f9b15026507 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.282388ms" +ts=2024-05-02T12:17:23.445836437Z caller=http.go:194 level=debug traceID=0f9f60e69c747de7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.097278ms" +ts=2024-05-02T12:17:23.445740527Z caller=http.go:194 level=debug traceID=2f04a6306ef101b4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.724121ms" +ts=2024-05-02T12:17:23.445299822Z caller=http.go:194 level=debug traceID=5a0dbe66741c2749 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 16.278123ms" +ts=2024-05-02T12:17:23.444612726Z caller=http.go:194 level=debug traceID=2e787f2a02e48c84 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.708356ms" +ts=2024-05-02T12:17:23.444126572Z caller=http.go:194 level=debug traceID=0ccd8eb1e31a24af orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.987597ms" +ts=2024-05-02T12:17:23.443629182Z caller=http.go:194 level=debug traceID=317e08b29603a841 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.928481ms" +ts=2024-05-02T12:17:23.443535085Z caller=http.go:194 level=debug traceID=32dbb3b2e2a46cdf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.562955ms" +ts=2024-05-02T12:17:23.442731478Z caller=http.go:194 level=debug traceID=17b173c7c7c12d67 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.964477ms" +ts=2024-05-02T12:17:23.440648359Z caller=http.go:194 level=debug traceID=67f1cd87edc451fa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.067757ms" +ts=2024-05-02T12:17:23.440348358Z caller=http.go:194 level=debug traceID=6fb54e6889a62a34 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.888807ms" +ts=2024-05-02T12:17:23.440129446Z caller=http.go:194 level=debug traceID=5d004b5964aa32f7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.329192ms" +ts=2024-05-02T12:17:23.440083033Z caller=http.go:194 level=debug traceID=094587852fabe289 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.83254ms" +ts=2024-05-02T12:17:23.43930267Z caller=http.go:194 level=debug traceID=5425e12a22a9df44 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.285265ms" +ts=2024-05-02T12:17:23.438560019Z caller=http.go:194 level=debug traceID=2926c77b180b83d7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.690038ms" +ts=2024-05-02T12:17:23.437724497Z caller=http.go:194 level=debug traceID=65dff31841b60e38 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.256318ms" +ts=2024-05-02T12:17:23.437677291Z caller=http.go:194 level=debug traceID=494f249e0530f04a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.207776ms" +ts=2024-05-02T12:17:23.437403967Z caller=http.go:194 level=debug traceID=7b9f7f66bdda56c3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.066189ms" +ts=2024-05-02T12:17:23.437090775Z caller=http.go:194 level=debug traceID=71c85393b6792463 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.414166ms" +ts=2024-05-02T12:17:23.436400285Z caller=http.go:194 level=debug traceID=4c8560854c2e8d4c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.846596ms" +ts=2024-05-02T12:17:23.436118583Z caller=http.go:194 level=debug traceID=7a6a0613eb228f2c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.500158ms" +ts=2024-05-02T12:17:23.434029686Z caller=http.go:194 level=debug traceID=2f04a6306ef101b4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.760782ms" +ts=2024-05-02T12:17:23.432880351Z caller=http.go:194 level=debug traceID=2e787f2a02e48c84 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.23123ms" +ts=2024-05-02T12:17:23.43241789Z caller=http.go:194 level=debug traceID=7ef17301a307f0d8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.424925ms" +ts=2024-05-02T12:17:23.432402153Z caller=http.go:194 level=debug traceID=17b173c7c7c12d67 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.810488ms" +ts=2024-05-02T12:17:23.432362364Z caller=http.go:194 level=debug traceID=22ead0efedcb2fbb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.525408ms" +ts=2024-05-02T12:17:23.43033615Z caller=http.go:194 level=debug traceID=67f1cd87edc451fa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.26835ms" +ts=2024-05-02T12:17:23.429935475Z caller=http.go:194 level=debug traceID=46cafb09642e4b42 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.843776ms" +ts=2024-05-02T12:17:23.429371241Z caller=http.go:194 level=debug traceID=44643390fed7bf7e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.156449ms" +ts=2024-05-02T12:17:23.429045184Z caller=http.go:194 level=debug traceID=27c3f3423d3ea959 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 40.50907ms" +ts=2024-05-02T12:17:23.428928466Z caller=http.go:194 level=debug traceID=2926c77b180b83d7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.998881ms" +ts=2024-05-02T12:17:23.428965038Z caller=http.go:194 level=debug traceID=6fb54e6889a62a34 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.909883ms" +ts=2024-05-02T12:17:23.428946446Z caller=http.go:194 level=debug traceID=394a260cadfcc108 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.053528ms" +ts=2024-05-02T12:17:23.428579634Z caller=http.go:194 level=debug traceID=463692a324ca3422 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 15.729516ms" +ts=2024-05-02T12:17:23.428317794Z caller=http.go:194 level=debug traceID=7b185d1cd23de1b6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.966416ms" +ts=2024-05-02T12:17:23.427169043Z caller=http.go:194 level=debug traceID=5425e12a22a9df44 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.721479ms" +ts=2024-05-02T12:17:23.426942454Z caller=http.go:194 level=debug traceID=094587852fabe289 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.42337ms" +ts=2024-05-02T12:17:23.42637436Z caller=http.go:194 level=debug traceID=449f09ad40e0a35c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.676427ms" +ts=2024-05-02T12:17:23.425653117Z caller=http.go:194 level=debug traceID=3cc7e45d060226c5 orgID=3648 msg="POST /push.v1.PusherService/Push (400) 378.053µs" +ts=2024-05-02T12:17:23.425235456Z caller=http.go:194 level=debug traceID=5d004b5964aa32f7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.771732ms" +ts=2024-05-02T12:17:23.424260911Z caller=http.go:194 level=debug traceID=4d810b6b4651cceb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.63162ms" +ts=2024-05-02T12:17:23.423884607Z caller=http.go:194 level=debug traceID=125d4b9c5abc6f2f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.495148ms" +ts=2024-05-02T12:17:23.42194413Z caller=http.go:194 level=debug traceID=7ef17301a307f0d8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.995261ms" +ts=2024-05-02T12:17:23.421690533Z caller=http.go:194 level=debug traceID=56047620b1376eb5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.2044ms" +ts=2024-05-02T12:17:23.421041285Z caller=http.go:194 level=debug traceID=14b8d71e508bbea0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.201749ms" +ts=2024-05-02T12:17:23.420625669Z caller=http.go:194 level=debug traceID=22ead0efedcb2fbb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.615981ms" +ts=2024-05-02T12:17:23.41979236Z caller=http.go:194 level=debug traceID=394a260cadfcc108 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.930753ms" +ts=2024-05-02T12:17:23.419585375Z caller=http.go:194 level=debug traceID=44643390fed7bf7e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.658493ms" +ts=2024-05-02T12:17:23.418310606Z caller=http.go:194 level=debug traceID=46cafb09642e4b42 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.601618ms" +ts=2024-05-02T12:17:23.417698778Z caller=http.go:194 level=debug traceID=51e51a31b0c17985 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.611018ms" +ts=2024-05-02T12:17:23.417608245Z caller=http.go:194 level=debug traceID=7b185d1cd23de1b6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.50635ms" +ts=2024-05-02T12:17:23.416256622Z caller=http.go:194 level=debug traceID=6429c3b87dce6315 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.413659ms" +ts=2024-05-02T12:17:23.416247332Z caller=http.go:194 level=debug traceID=6361d72c1f97d279 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.096643ms" +ts=2024-05-02T12:17:23.415689336Z caller=http.go:194 level=debug traceID=6ef7492795388d2b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.800165ms" +ts=2024-05-02T12:17:23.415474915Z caller=http.go:194 level=debug traceID=463692a324ca3422 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.031182ms" +ts=2024-05-02T12:17:23.415099144Z caller=http.go:194 level=debug traceID=449f09ad40e0a35c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.12158ms" +ts=2024-05-02T12:17:23.414676628Z caller=http.go:194 level=debug traceID=6f7ea764509b081b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.380879ms" +ts=2024-05-02T12:17:23.414583643Z caller=http.go:194 level=debug traceID=3cc7e45d060226c5 orgID=1218 msg="POST /push.v1.PusherService/Push (400) 391.909µs" +ts=2024-05-02T12:17:23.413347446Z caller=http.go:194 level=debug traceID=4d810b6b4651cceb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.813522ms" +ts=2024-05-02T12:17:23.41310645Z caller=http.go:194 level=debug traceID=0d15c321a6f95ea3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.847038ms" +ts=2024-05-02T12:17:23.412690408Z caller=http.go:194 level=debug traceID=578d872efd4d230f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.799443ms" +ts=2024-05-02T12:17:23.411949033Z caller=http.go:194 level=debug traceID=300f1a23992576c6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.771142ms" +ts=2024-05-02T12:17:23.411706211Z caller=http.go:194 level=debug traceID=125d4b9c5abc6f2f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.413897ms" +ts=2024-05-02T12:17:23.410600327Z caller=http.go:194 level=debug traceID=56047620b1376eb5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.234004ms" +ts=2024-05-02T12:17:23.40896116Z caller=http.go:194 level=debug traceID=3aac370e35ea0882 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.863582ms" +ts=2024-05-02T12:17:23.408660439Z caller=http.go:194 level=debug traceID=14b8d71e508bbea0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.635897ms" +ts=2024-05-02T12:17:23.408657628Z caller=http.go:194 level=debug traceID=7019d2de7b1a6b18 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.717919ms" +ts=2024-05-02T12:17:23.407691212Z caller=http.go:194 level=debug traceID=51e51a31b0c17985 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.035836ms" +ts=2024-05-02T12:17:23.407169241Z caller=http.go:194 level=debug traceID=63d1eb0070632ea0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.697367ms" +ts=2024-05-02T12:17:23.406652692Z caller=http.go:194 level=debug traceID=6361d72c1f97d279 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.400617ms" +ts=2024-05-02T12:17:23.406170947Z caller=http.go:194 level=debug traceID=38fead0920af1226 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.51138ms" +ts=2024-05-02T12:17:23.40613257Z caller=http.go:194 level=debug traceID=6429c3b87dce6315 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.856677ms" +ts=2024-05-02T12:17:23.405384305Z caller=http.go:194 level=debug traceID=5ab7e2e46be90750 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 15.464919ms" +ts=2024-05-02T12:17:23.405197158Z caller=http.go:194 level=debug traceID=6ef7492795388d2b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.386068ms" +ts=2024-05-02T12:17:23.40502123Z caller=http.go:194 level=debug traceID=2baa2296fa38d33d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 13.095938ms" +ts=2024-05-02T12:17:23.404958857Z caller=http.go:194 level=debug traceID=262b0c0c65f3eb68 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.105252ms" +ts=2024-05-02T12:17:23.404213002Z caller=http.go:194 level=debug traceID=6f7ea764509b081b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.176311ms" +ts=2024-05-02T12:17:23.403337904Z caller=http.go:194 level=debug traceID=0d15c321a6f95ea3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.463875ms" +ts=2024-05-02T12:17:23.402872885Z caller=http.go:194 level=debug traceID=578d872efd4d230f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.813151ms" +ts=2024-05-02T12:17:23.400904108Z caller=http.go:194 level=debug traceID=300f1a23992576c6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.842745ms" +ts=2024-05-02T12:17:23.400824337Z caller=http.go:194 level=debug traceID=7019d2de7b1a6b18 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.900208ms" +ts=2024-05-02T12:17:23.399073676Z caller=http.go:194 level=debug traceID=001137c7c2767471 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.970442ms" +ts=2024-05-02T12:17:23.396926237Z caller=http.go:194 level=debug traceID=4f7b70c418221eb9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.782486ms" +ts=2024-05-02T12:17:23.397034138Z caller=http.go:194 level=debug traceID=1b264ae24b146f90 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.587088ms" +ts=2024-05-02T12:17:23.396657547Z caller=http.go:194 level=debug traceID=575b4d6965874394 orgID=3648 msg="POST /push.v1.PusherService/Push (400) 136.042µs" +ts=2024-05-02T12:17:23.396399261Z caller=http.go:194 level=debug traceID=67d9609d6036aa01 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.0028ms" +ts=2024-05-02T12:17:23.396314491Z caller=http.go:194 level=debug traceID=0d3021eb806b022a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.008468ms" +ts=2024-05-02T12:17:23.39527928Z caller=http.go:194 level=debug traceID=262b0c0c65f3eb68 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.137487ms" +ts=2024-05-02T12:17:23.395089879Z caller=http.go:194 level=debug traceID=3aac370e35ea0882 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.225431ms" +ts=2024-05-02T12:17:23.395066235Z caller=http.go:194 level=debug traceID=38fead0920af1226 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.520916ms" +ts=2024-05-02T12:17:23.394688999Z caller=http.go:194 level=debug traceID=34d7abc25123d01e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.79503ms" +ts=2024-05-02T12:17:23.394116093Z caller=http.go:194 level=debug traceID=63d1eb0070632ea0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.266594ms" +ts=2024-05-02T12:17:23.393786085Z caller=http.go:194 level=debug traceID=420d3af4edc2a61a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.523052ms" +ts=2024-05-02T12:17:23.391309449Z caller=http.go:194 level=debug traceID=2baa2296fa38d33d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 19.271836ms" +ts=2024-05-02T12:17:23.391257709Z caller=http.go:194 level=debug traceID=45f9c0069dcc8281 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.919247ms" +ts=2024-05-02T12:17:23.390209303Z caller=http.go:194 level=debug traceID=5ab7e2e46be90750 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.022334ms" +ts=2024-05-02T12:17:23.389817716Z caller=http.go:194 level=debug traceID=409fc5be1d6713a2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.106089ms" +ts=2024-05-02T12:17:23.387348659Z caller=http.go:194 level=debug traceID=001137c7c2767471 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.345581ms" +ts=2024-05-02T12:17:23.386783765Z caller=http.go:194 level=debug traceID=6122275589515357 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.974477ms" +ts=2024-05-02T12:17:23.386456868Z caller=http.go:194 level=debug traceID=4f7b70c418221eb9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.507598ms" +ts=2024-05-02T12:17:23.386153682Z caller=http.go:194 level=debug traceID=0b463e9c680958f5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.27735ms" +ts=2024-05-02T12:17:23.386220644Z caller=http.go:194 level=debug traceID=62c1068a1ebdeac0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.695734ms" +ts=2024-05-02T12:17:23.386125947Z caller=http.go:194 level=debug traceID=1b264ae24b146f90 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.389816ms" +ts=2024-05-02T12:17:23.385759946Z caller=http.go:194 level=debug traceID=1126ef06d8ca4041 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.864302ms" +ts=2024-05-02T12:17:23.385641403Z caller=http.go:194 level=debug traceID=575b4d6965874394 orgID=1218 msg="POST /push.v1.PusherService/Push (400) 199.45µs" +ts=2024-05-02T12:17:23.385521162Z caller=http.go:194 level=debug traceID=62f97feda7b17985 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.64595ms" +ts=2024-05-02T12:17:23.385051487Z caller=http.go:194 level=debug traceID=0d3021eb806b022a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.4661ms" +ts=2024-05-02T12:17:23.385078301Z caller=http.go:194 level=debug traceID=67d9609d6036aa01 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.653783ms" +ts=2024-05-02T12:17:23.383070752Z caller=http.go:194 level=debug traceID=34d7abc25123d01e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.414964ms" +ts=2024-05-02T12:17:23.382537603Z caller=http.go:194 level=debug traceID=3e7ff036e7d6bdfb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.577274ms" +ts=2024-05-02T12:17:23.381786678Z caller=http.go:194 level=debug traceID=420d3af4edc2a61a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.572975ms" +ts=2024-05-02T12:17:23.38017914Z caller=http.go:194 level=debug traceID=45f9c0069dcc8281 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.039388ms" +ts=2024-05-02T12:17:23.378917503Z caller=http.go:194 level=debug traceID=409fc5be1d6713a2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.685823ms" +ts=2024-05-02T12:17:23.377837636Z caller=http.go:194 level=debug traceID=7fa7244a0600f2d3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.981846ms" +ts=2024-05-02T12:17:23.377492054Z caller=http.go:194 level=debug traceID=01ab56640b58a5a1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.417973ms" +ts=2024-05-02T12:17:23.377307323Z caller=http.go:194 level=debug traceID=5d4f0b6dae8b2623 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.078937ms" +ts=2024-05-02T12:17:23.377210591Z caller=http.go:194 level=debug traceID=186139591cea7838 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.997578ms" +ts=2024-05-02T12:17:23.377114993Z caller=http.go:194 level=debug traceID=7378abf6976127f2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.986729ms" +ts=2024-05-02T12:17:23.37596629Z caller=http.go:194 level=debug traceID=62f97feda7b17985 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.319958ms" +ts=2024-05-02T12:17:23.374906652Z caller=http.go:194 level=debug traceID=6122275589515357 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.543403ms" +ts=2024-05-02T12:17:23.374625652Z caller=http.go:194 level=debug traceID=0b463e9c680958f5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.003133ms" +ts=2024-05-02T12:17:23.374650141Z caller=http.go:194 level=debug traceID=62c1068a1ebdeac0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.319228ms" +ts=2024-05-02T12:17:23.373996133Z caller=http.go:194 level=debug traceID=1126ef06d8ca4041 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.74153ms" +ts=2024-05-02T12:17:23.370094149Z caller=http.go:194 level=debug traceID=233b67383efd3311 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.034037ms" +ts=2024-05-02T12:17:23.368587294Z caller=http.go:194 level=debug traceID=6e74b9c2f1c3cd2f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.750782ms" +ts=2024-05-02T12:17:23.368428098Z caller=http.go:194 level=debug traceID=3e7ff036e7d6bdfb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.306362ms" +ts=2024-05-02T12:17:23.367120721Z caller=http.go:194 level=debug traceID=7fa7244a0600f2d3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.269304ms" +ts=2024-05-02T12:17:23.366829869Z caller=http.go:194 level=debug traceID=01ab56640b58a5a1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.774675ms" +ts=2024-05-02T12:17:23.365956941Z caller=http.go:194 level=debug traceID=6e2e5b5e934daff4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.602818ms" +ts=2024-05-02T12:17:23.365941332Z caller=http.go:194 level=debug traceID=7378abf6976127f2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.740362ms" +ts=2024-05-02T12:17:23.365890043Z caller=http.go:194 level=debug traceID=186139591cea7838 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.531888ms" +ts=2024-05-02T12:17:23.364799655Z caller=http.go:194 level=debug traceID=5d4f0b6dae8b2623 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.56042ms" +ts=2024-05-02T12:17:23.36452233Z caller=http.go:194 level=debug traceID=5d669aa222410460 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.667323ms" +ts=2024-05-02T12:17:23.363897079Z caller=http.go:194 level=debug traceID=4d2710081e80a605 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.730428ms" +ts=2024-05-02T12:17:23.36193572Z caller=http.go:194 level=debug traceID=757cb2390fd16d38 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.022831ms" +ts=2024-05-02T12:17:23.361262205Z caller=http.go:194 level=debug traceID=7126ea144c3a488d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.32349ms" +ts=2024-05-02T12:17:23.360911728Z caller=http.go:194 level=debug traceID=31e1e7273fc01492 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.190837ms" +ts=2024-05-02T12:17:23.360368668Z caller=http.go:194 level=debug traceID=7164e85de37bee78 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.890331ms" +ts=2024-05-02T12:17:23.359961122Z caller=http.go:194 level=debug traceID=4cbdaff160070b50 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.410965ms" +ts=2024-05-02T12:17:23.359992909Z caller=http.go:194 level=debug traceID=0c80af3a59bac47f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 697.835µs" +ts=2024-05-02T12:17:23.359581395Z caller=http.go:194 level=debug traceID=233b67383efd3311 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.53958ms" +ts=2024-05-02T12:17:23.35895219Z caller=http.go:194 level=debug traceID=6e74b9c2f1c3cd2f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.009748ms" +ts=2024-05-02T12:17:23.358034307Z caller=http.go:194 level=debug traceID=3da3d39ed68595c7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.663168ms" +ts=2024-05-02T12:17:23.355916129Z caller=http.go:194 level=debug traceID=345cc7a1549c8886 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.989507ms" +ts=2024-05-02T12:17:23.355256374Z caller=http.go:194 level=debug traceID=6e2e5b5e934daff4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.14982ms" +ts=2024-05-02T12:17:23.353719368Z caller=http.go:194 level=debug traceID=5d669aa222410460 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.733964ms" +ts=2024-05-02T12:17:23.353611994Z caller=http.go:194 level=debug traceID=5ee21ab0701ea5a4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.971989ms" +ts=2024-05-02T12:17:23.352897329Z caller=http.go:194 level=debug traceID=2294c2825784475a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.434238ms" +ts=2024-05-02T12:17:23.352963129Z caller=http.go:194 level=debug traceID=4d2710081e80a605 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.262876ms" +ts=2024-05-02T12:17:23.352625028Z caller=http.go:194 level=debug traceID=1e9f983d1edccba3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.532764ms" +ts=2024-05-02T12:17:23.352554624Z caller=http.go:194 level=debug traceID=5d0d7c012e3b76c8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.635543ms" +ts=2024-05-02T12:17:23.351805727Z caller=http.go:194 level=debug traceID=52a4a646e5c063e4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.138604ms" +ts=2024-05-02T12:17:23.351423852Z caller=http.go:194 level=debug traceID=7126ea144c3a488d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.34637ms" +ts=2024-05-02T12:17:23.350289612Z caller=http.go:194 level=debug traceID=757cb2390fd16d38 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.170644ms" +ts=2024-05-02T12:17:23.349258934Z caller=http.go:194 level=debug traceID=7164e85de37bee78 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.946191ms" +ts=2024-05-02T12:17:23.348887762Z caller=http.go:194 level=debug traceID=31e1e7273fc01492 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.776291ms" +ts=2024-05-02T12:17:23.348677085Z caller=http.go:194 level=debug traceID=0c80af3a59bac47f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 387.072µs" +ts=2024-05-02T12:17:23.347504105Z caller=http.go:194 level=debug traceID=3da3d39ed68595c7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.853801ms" +ts=2024-05-02T12:17:23.347159318Z caller=http.go:194 level=debug traceID=4cbdaff160070b50 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.95164ms" +ts=2024-05-02T12:17:23.344800969Z caller=http.go:194 level=debug traceID=345cc7a1549c8886 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.991184ms" +ts=2024-05-02T12:17:23.344193396Z caller=http.go:194 level=debug traceID=68c39a6a38430ac2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.590435ms" +ts=2024-05-02T12:17:23.344094271Z caller=http.go:194 level=debug traceID=54ad8b289fdd67b4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.972053ms" +ts=2024-05-02T12:17:23.343133005Z caller=http.go:194 level=debug traceID=5d0d7c012e3b76c8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.148434ms" +ts=2024-05-02T12:17:23.342489699Z caller=http.go:194 level=debug traceID=1e9f983d1edccba3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.526087ms" +ts=2024-05-02T12:17:23.341656014Z caller=http.go:194 level=debug traceID=0345d8839f9b6e97 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.035539ms" +ts=2024-05-02T12:17:23.340633487Z caller=http.go:194 level=debug traceID=52a4a646e5c063e4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.364015ms" +ts=2024-05-02T12:17:23.340517131Z caller=http.go:194 level=debug traceID=5ee21ab0701ea5a4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.072412ms" +ts=2024-05-02T12:17:23.340453678Z caller=http.go:194 level=debug traceID=2294c2825784475a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.451531ms" +ts=2024-05-02T12:17:23.340274722Z caller=http.go:194 level=debug traceID=7c213cfbc2d51788 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.116556ms" +ts=2024-05-02T12:17:23.339335073Z caller=http.go:194 level=debug traceID=0e9372f5a563ccfb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.717556ms" +ts=2024-05-02T12:17:23.338181325Z caller=http.go:194 level=debug traceID=5ff2412e77da321f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.381527ms" +ts=2024-05-02T12:17:23.335756371Z caller=http.go:194 level=debug traceID=4ebc24b18afdb1e8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.873893ms" +ts=2024-05-02T12:17:23.335698866Z caller=http.go:194 level=debug traceID=7071a035b4544b9d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.382468ms" +ts=2024-05-02T12:17:23.333882697Z caller=http.go:194 level=debug traceID=54ad8b289fdd67b4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.169492ms" +ts=2024-05-02T12:17:23.33352635Z caller=http.go:194 level=debug traceID=424681f7c106a7bd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.420606ms" +ts=2024-05-02T12:17:23.332401634Z caller=http.go:194 level=debug traceID=68c39a6a38430ac2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.316491ms" +ts=2024-05-02T12:17:23.330630257Z caller=http.go:194 level=debug traceID=0345d8839f9b6e97 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.040466ms" +ts=2024-05-02T12:17:23.330084574Z caller=http.go:194 level=debug traceID=1539fd1b0360c7a4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.636283ms" +ts=2024-05-02T12:17:23.329887488Z caller=http.go:194 level=debug traceID=56cc97f5fc8470d6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.542816ms" +ts=2024-05-02T12:17:23.329844295Z caller=http.go:194 level=debug traceID=0e9372f5a563ccfb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.006761ms" +ts=2024-05-02T12:17:23.329028484Z caller=http.go:194 level=debug traceID=7c213cfbc2d51788 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.219972ms" +ts=2024-05-02T12:17:23.328978182Z caller=http.go:194 level=debug traceID=6a5fef6695114cf4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.1928ms" +ts=2024-05-02T12:17:23.326958206Z caller=http.go:194 level=debug traceID=5ff2412e77da321f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.099476ms" +ts=2024-05-02T12:17:23.326358333Z caller=http.go:194 level=debug traceID=735c30fc7c76747f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.437229ms" +ts=2024-05-02T12:17:23.325988527Z caller=http.go:194 level=debug traceID=2f5752f5fbb1baea orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.841034ms" +ts=2024-05-02T12:17:23.325360951Z caller=http.go:194 level=debug traceID=7071a035b4544b9d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.306325ms" +ts=2024-05-02T12:17:23.324483864Z caller=http.go:194 level=debug traceID=66196836f6c3e8cf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.299535ms" +ts=2024-05-02T12:17:23.323685323Z caller=http.go:194 level=debug traceID=69f016620ae9da96 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.500532ms" +ts=2024-05-02T12:17:23.323447903Z caller=http.go:194 level=debug traceID=4ebc24b18afdb1e8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.630159ms" +ts=2024-05-02T12:17:23.32303027Z caller=http.go:194 level=debug traceID=542ccd42bc95542c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.400465ms" +ts=2024-05-02T12:17:23.322648265Z caller=http.go:194 level=debug traceID=0945d07d7de97acf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.515508ms" +ts=2024-05-02T12:17:23.321657822Z caller=http.go:194 level=debug traceID=3714aba49ce6fafa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 46.819892ms" +ts=2024-05-02T12:17:23.321280133Z caller=http.go:194 level=debug traceID=424681f7c106a7bd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.614582ms" +ts=2024-05-02T12:17:23.319309749Z caller=http.go:194 level=debug traceID=6a5fef6695114cf4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.527932ms" +ts=2024-05-02T12:17:23.318200395Z caller=http.go:194 level=debug traceID=56cc97f5fc8470d6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.037036ms" +ts=2024-05-02T12:17:23.317112972Z caller=http.go:194 level=debug traceID=1539fd1b0360c7a4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.046794ms" +ts=2024-05-02T12:17:23.315893247Z caller=http.go:194 level=debug traceID=66196836f6c3e8cf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.78446ms" +ts=2024-05-02T12:17:23.315046793Z caller=http.go:194 level=debug traceID=735c30fc7c76747f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.76376ms" +ts=2024-05-02T12:17:23.313754024Z caller=http.go:194 level=debug traceID=3ea0d0c7d775ab58 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.093519ms" +ts=2024-05-02T12:17:23.313436978Z caller=http.go:194 level=debug traceID=2f5752f5fbb1baea orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.454832ms" +ts=2024-05-02T12:17:23.313183322Z caller=http.go:194 level=debug traceID=69f016620ae9da96 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.081559ms" +ts=2024-05-02T12:17:23.31269393Z caller=http.go:194 level=debug traceID=0945d07d7de97acf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.746039ms" +ts=2024-05-02T12:17:23.312199527Z caller=http.go:194 level=debug traceID=67c08c234a1587c9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.312629ms" +ts=2024-05-02T12:17:23.31179116Z caller=http.go:194 level=debug traceID=542ccd42bc95542c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.752712ms" +ts=2024-05-02T12:17:23.310395626Z caller=http.go:194 level=debug traceID=048cc37cc7efc077 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 45.728665ms" +ts=2024-05-02T12:17:23.309469978Z caller=http.go:194 level=debug traceID=568248f3dd8dc186 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.093892ms" +ts=2024-05-02T12:17:23.309276353Z caller=http.go:194 level=debug traceID=2293f9c8ebdfa1c3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.836366ms" +ts=2024-05-02T12:17:23.308242418Z caller=http.go:194 level=debug traceID=204b917d20ca2be1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.705516ms" +ts=2024-05-02T12:17:23.307185015Z caller=http.go:194 level=debug traceID=1183ef8339d30ad7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.76919ms" +ts=2024-05-02T12:17:23.306275384Z caller=http.go:194 level=debug traceID=043eac8f18c75d37 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.793978ms" +ts=2024-05-02T12:17:23.306052173Z caller=http.go:194 level=debug traceID=52413fee38214e88 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.678694ms" +ts=2024-05-02T12:17:23.302600086Z caller=http.go:194 level=debug traceID=568248f3dd8dc186 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.595194ms" +ts=2024-05-02T12:17:23.30227694Z caller=http.go:194 level=debug traceID=05a0ea621c8abd4d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.121165ms" +ts=2024-05-02T12:17:23.30214336Z caller=http.go:194 level=debug traceID=3ea0d0c7d775ab58 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.819344ms" +ts=2024-05-02T12:17:23.302092383Z caller=http.go:194 level=debug traceID=72d5344cc7c2f5eb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.824104ms" +ts=2024-05-02T12:17:23.301457074Z caller=http.go:194 level=debug traceID=67c08c234a1587c9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.85551ms" +ts=2024-05-02T12:17:23.298942001Z caller=http.go:194 level=debug traceID=0800006395ebd62b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 354.6µs" +ts=2024-05-02T12:17:23.297842513Z caller=http.go:194 level=debug traceID=204b917d20ca2be1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.189493ms" +ts=2024-05-02T12:17:23.297170684Z caller=http.go:194 level=debug traceID=2293f9c8ebdfa1c3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.748201ms" +ts=2024-05-02T12:17:23.296117349Z caller=http.go:194 level=debug traceID=6b27ff30ebb6a9cb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.04158ms" +ts=2024-05-02T12:17:23.296179835Z caller=http.go:194 level=debug traceID=4a213613062edd87 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.699004ms" +ts=2024-05-02T12:17:23.295559336Z caller=http.go:194 level=debug traceID=043eac8f18c75d37 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.11286ms" +ts=2024-05-02T12:17:23.295043296Z caller=http.go:194 level=debug traceID=1183ef8339d30ad7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 16.428651ms" +ts=2024-05-02T12:17:23.294542992Z caller=http.go:194 level=debug traceID=52413fee38214e88 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.44521ms" +ts=2024-05-02T12:17:23.293713237Z caller=http.go:194 level=debug traceID=05a0ea621c8abd4d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 18.355521ms" +ts=2024-05-02T12:17:23.292485915Z caller=http.go:194 level=debug traceID=22a5a86233cf480b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 384.203µs" +ts=2024-05-02T12:17:23.292254841Z caller=http.go:194 level=debug traceID=72d5344cc7c2f5eb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.972412ms" +ts=2024-05-02T12:17:23.291553123Z caller=http.go:194 level=debug traceID=532db18e63395c1d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.66767ms" +ts=2024-05-02T12:17:23.290758538Z caller=http.go:194 level=debug traceID=3f636ce933e76cc3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.311744ms" +ts=2024-05-02T12:17:23.290277405Z caller=http.go:194 level=debug traceID=1dabb1fdee27cbdf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.687066ms" +ts=2024-05-02T12:17:23.287445486Z caller=http.go:194 level=debug traceID=0800006395ebd62b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 364.796µs" +ts=2024-05-02T12:17:23.286793216Z caller=http.go:194 level=debug traceID=604dcde9965a3e37 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 57.432872ms" +ts=2024-05-02T12:17:23.285348203Z caller=http.go:194 level=debug traceID=6b27ff30ebb6a9cb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.895884ms" +ts=2024-05-02T12:17:23.285239192Z caller=http.go:194 level=debug traceID=2b4410766405c740 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.914338ms" +ts=2024-05-02T12:17:23.285053089Z caller=http.go:194 level=debug traceID=4a213613062edd87 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.603697ms" +ts=2024-05-02T12:17:23.284087472Z caller=http.go:194 level=debug traceID=3714aba49ce6fafa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 20.095176ms" +ts=2024-05-02T12:17:23.282545381Z caller=http.go:194 level=debug traceID=236a2d6c0bcb4b4f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.531737ms" +ts=2024-05-02T12:17:23.282018522Z caller=http.go:194 level=debug traceID=1a2409afb7fd14db orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.894824ms" +ts=2024-05-02T12:17:23.2813548Z caller=http.go:194 level=debug traceID=7f31472b858b52d8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.340157ms" +ts=2024-05-02T12:17:23.281215506Z caller=http.go:194 level=debug traceID=3f636ce933e76cc3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.841599ms" +ts=2024-05-02T12:17:23.281063878Z caller=http.go:194 level=debug traceID=22a5a86233cf480b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 413.376µs" +ts=2024-05-02T12:17:23.281015612Z caller=http.go:194 level=debug traceID=532db18e63395c1d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.475475ms" +ts=2024-05-02T12:17:23.279777225Z caller=http.go:194 level=debug traceID=0664c3894e0c59f3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 21.068713ms" +ts=2024-05-02T12:17:23.277145428Z caller=http.go:194 level=debug traceID=1dabb1fdee27cbdf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.571308ms" +ts=2024-05-02T12:17:23.276800464Z caller=http.go:194 level=debug traceID=3ea4234ab2d4b865 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.633547ms" +ts=2024-05-02T12:17:23.276756831Z caller=http.go:194 level=debug traceID=64a494c7857a6a8c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.970847ms" +ts=2024-05-02T12:17:23.276716415Z caller=http.go:194 level=debug traceID=762648a70653b642 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.535535ms" +ts=2024-05-02T12:17:23.275844876Z caller=http.go:194 level=debug traceID=22994e5751eb41d2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.168457ms" +ts=2024-05-02T12:17:23.274848383Z caller=http.go:194 level=debug traceID=1315ca59bbc06e0d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.562727ms" +ts=2024-05-02T12:17:23.274754619Z caller=http.go:194 level=debug traceID=2b4410766405c740 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.616466ms" +ts=2024-05-02T12:17:23.270511794Z caller=http.go:194 level=debug traceID=60ab00f707c5f9e0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.25393ms" +ts=2024-05-02T12:17:23.270372041Z caller=http.go:194 level=debug traceID=1a2409afb7fd14db orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.882321ms" +ts=2024-05-02T12:17:23.270270214Z caller=http.go:194 level=debug traceID=236a2d6c0bcb4b4f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.021044ms" +ts=2024-05-02T12:17:23.269901685Z caller=http.go:194 level=debug traceID=54a124def912eef0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.935195ms" +ts=2024-05-02T12:17:23.269777501Z caller=http.go:194 level=debug traceID=048cc37cc7efc077 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 18.966458ms" +ts=2024-05-02T12:17:23.269098919Z caller=http.go:194 level=debug traceID=7f31472b858b52d8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.173823ms" +ts=2024-05-02T12:17:23.268118864Z caller=http.go:194 level=debug traceID=367cad8deaa0da61 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.247432ms" +ts=2024-05-02T12:17:23.268191993Z caller=http.go:194 level=debug traceID=66ab3f09b6426db4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.751155ms" +ts=2024-05-02T12:17:23.268038169Z caller=http.go:194 level=debug traceID=5d6974bf99b34fa1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.165402ms" +ts=2024-05-02T12:17:23.266704153Z caller=http.go:194 level=debug traceID=64a494c7857a6a8c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.916434ms" +ts=2024-05-02T12:17:23.266754961Z caller=http.go:194 level=debug traceID=0b21b0227c3f74ab orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.795136ms" +ts=2024-05-02T12:17:23.266596411Z caller=http.go:194 level=debug traceID=1315ca59bbc06e0d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.760175ms" +ts=2024-05-02T12:17:23.265778621Z caller=http.go:194 level=debug traceID=3ea4234ab2d4b865 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.707047ms" +ts=2024-05-02T12:17:23.265434286Z caller=http.go:194 level=debug traceID=762648a70653b642 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.347089ms" +ts=2024-05-02T12:17:23.264859473Z caller=http.go:194 level=debug traceID=22994e5751eb41d2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.452642ms" +ts=2024-05-02T12:17:23.264637336Z caller=http.go:194 level=debug traceID=0664c3894e0c59f3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 18.845519ms" +ts=2024-05-02T12:17:23.264330684Z caller=http.go:194 level=debug traceID=60ab00f707c5f9e0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.227601ms" +ts=2024-05-02T12:17:23.261669038Z caller=http.go:194 level=debug traceID=4f9459e203f7c533 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.73615ms" +ts=2024-05-02T12:17:23.2594312Z caller=http.go:194 level=debug traceID=3824e1a7ab1f53d6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.205285ms" +ts=2024-05-02T12:17:23.259086043Z caller=http.go:194 level=debug traceID=555500f350bd4813 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.894021ms" +ts=2024-05-02T12:17:23.259037772Z caller=http.go:194 level=debug traceID=54a124def912eef0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.253978ms" +ts=2024-05-02T12:17:23.257989012Z caller=http.go:194 level=debug traceID=66ab3f09b6426db4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.346436ms" +ts=2024-05-02T12:17:23.257458039Z caller=http.go:194 level=debug traceID=0b21b0227c3f74ab orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.358542ms" +ts=2024-05-02T12:17:23.256048558Z caller=http.go:194 level=debug traceID=367cad8deaa0da61 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.385066ms" +ts=2024-05-02T12:17:23.25577254Z caller=http.go:194 level=debug traceID=5d6974bf99b34fa1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.593497ms" +ts=2024-05-02T12:17:23.25508869Z caller=http.go:194 level=debug traceID=21e04a0c633a5d66 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.810039ms" +ts=2024-05-02T12:17:23.254588714Z caller=http.go:194 level=debug traceID=6f2a4095c247aecf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.890966ms" +ts=2024-05-02T12:17:23.254278619Z caller=http.go:194 level=debug traceID=7e4193bebcfe8eae orgID=3648 msg="POST /push.v1.PusherService/Push (200) 427.481µs" +ts=2024-05-02T12:17:23.253993702Z caller=http.go:194 level=debug traceID=70b86b55ba1a69af orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.450311ms" +ts=2024-05-02T12:17:23.25353164Z caller=http.go:194 level=debug traceID=3fb7c47bae866dd0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.537144ms" +ts=2024-05-02T12:17:23.253381339Z caller=http.go:194 level=debug traceID=000144ec235b78fc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 295.045µs" +ts=2024-05-02T12:17:23.25315849Z caller=http.go:194 level=debug traceID=6843315771807bab orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.429523ms" +ts=2024-05-02T12:17:23.251290918Z caller=http.go:194 level=debug traceID=4f9459e203f7c533 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.161484ms" +ts=2024-05-02T12:17:23.249770142Z caller=http.go:194 level=debug traceID=3824e1a7ab1f53d6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.540746ms" +ts=2024-05-02T12:17:23.249066589Z caller=http.go:194 level=debug traceID=604dcde9965a3e37 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 33.93884ms" +ts=2024-05-02T12:17:23.247681813Z caller=http.go:194 level=debug traceID=555500f350bd4813 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.82218ms" +ts=2024-05-02T12:17:23.24360692Z caller=http.go:194 level=debug traceID=5ebc4becae4e972a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.357748ms" +ts=2024-05-02T12:17:23.243310034Z caller=http.go:194 level=debug traceID=21e04a0c633a5d66 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.594059ms" +ts=2024-05-02T12:17:23.243316557Z caller=http.go:194 level=debug traceID=70b86b55ba1a69af orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.906907ms" +ts=2024-05-02T12:17:23.243211362Z caller=http.go:194 level=debug traceID=6f2a4095c247aecf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.837218ms" +ts=2024-05-02T12:17:23.242940909Z caller=http.go:194 level=debug traceID=7e4193bebcfe8eae orgID=1218 msg="POST /push.v1.PusherService/Push (200) 370.098µs" +ts=2024-05-02T12:17:23.242160859Z caller=http.go:194 level=debug traceID=000144ec235b78fc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 405.735µs" +ts=2024-05-02T12:17:23.242090417Z caller=http.go:194 level=debug traceID=3fb7c47bae866dd0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.904634ms" +ts=2024-05-02T12:17:23.240523941Z caller=http.go:194 level=debug traceID=6843315771807bab orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.593132ms" +ts=2024-05-02T12:17:23.240041629Z caller=http.go:194 level=debug traceID=5a77b19a32da23cb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.603914ms" +ts=2024-05-02T12:17:23.238358608Z caller=http.go:194 level=debug traceID=383283c5d186774c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.88077ms" +ts=2024-05-02T12:17:23.236905313Z caller=http.go:194 level=debug traceID=4fbbeed58c88395b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 34.012778ms" +ts=2024-05-02T12:17:23.235354571Z caller=http.go:194 level=debug traceID=2617ff8f06a81179 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 286.497µs" +ts=2024-05-02T12:17:23.234749387Z caller=http.go:194 level=debug traceID=229d27dd62ad553a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.138708ms" +ts=2024-05-02T12:17:23.23400353Z caller=http.go:194 level=debug traceID=49bdde1a91a070a9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.990928ms" +ts=2024-05-02T12:17:23.232030633Z caller=http.go:194 level=debug traceID=5ebc4becae4e972a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.352613ms" +ts=2024-05-02T12:17:23.229092301Z caller=http.go:194 level=debug traceID=2bbc8fab7cdd9133 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.773993ms" +ts=2024-05-02T12:17:23.228586717Z caller=http.go:194 level=debug traceID=5a77b19a32da23cb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.274796ms" +ts=2024-05-02T12:17:23.228546246Z caller=http.go:194 level=debug traceID=1dae67dcb093a0b8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.609007ms" +ts=2024-05-02T12:17:23.228154516Z caller=http.go:194 level=debug traceID=4fbbeed58c88395b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 13.801758ms" +ts=2024-05-02T12:17:23.226781791Z caller=http.go:194 level=debug traceID=5219b341126ea07d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.834935ms" +ts=2024-05-02T12:17:23.225593755Z caller=http.go:194 level=debug traceID=383283c5d186774c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.989098ms" +ts=2024-05-02T12:17:23.225409502Z caller=http.go:194 level=debug traceID=473e1620a2488403 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.727424ms" +ts=2024-05-02T12:17:23.224292213Z caller=http.go:194 level=debug traceID=229d27dd62ad553a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.339823ms" +ts=2024-05-02T12:17:23.22428262Z caller=http.go:194 level=debug traceID=2617ff8f06a81179 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 458.611µs" +ts=2024-05-02T12:17:23.224001905Z caller=http.go:194 level=debug traceID=49bdde1a91a070a9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.128485ms" +ts=2024-05-02T12:17:23.223164774Z caller=http.go:194 level=debug traceID=3080de8e19b11bb9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.701792ms" +ts=2024-05-02T12:17:23.221738892Z caller=http.go:194 level=debug traceID=188ecbd099014151 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 15.655568ms" +ts=2024-05-02T12:17:23.219315746Z caller=http.go:194 level=debug traceID=342fa310f85e2fe8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.015764ms" +ts=2024-05-02T12:17:23.217478548Z caller=http.go:194 level=debug traceID=1dae67dcb093a0b8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.686723ms" +ts=2024-05-02T12:17:23.217237731Z caller=http.go:194 level=debug traceID=2bbc8fab7cdd9133 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.618263ms" +ts=2024-05-02T12:17:23.216694934Z caller=http.go:194 level=debug traceID=342fa310f85e2fe8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.894775ms" +ts=2024-05-02T12:17:23.216565104Z caller=http.go:194 level=debug traceID=24402c00f28025da orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.087661ms" +ts=2024-05-02T12:17:23.215528153Z caller=http.go:194 level=debug traceID=473e1620a2488403 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.069421ms" +ts=2024-05-02T12:17:23.215403307Z caller=http.go:194 level=debug traceID=5219b341126ea07d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.977233ms" +ts=2024-05-02T12:17:23.213491084Z caller=http.go:194 level=debug traceID=41180fc05e65d7c9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.52749ms" +ts=2024-05-02T12:17:23.210047917Z caller=http.go:194 level=debug traceID=3080de8e19b11bb9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.586481ms" +ts=2024-05-02T12:17:23.207034726Z caller=http.go:194 level=debug traceID=188ecbd099014151 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.084614ms" +ts=2024-05-02T12:17:23.205201005Z caller=http.go:194 level=debug traceID=315806f9fc5f7660 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.411071ms" +ts=2024-05-02T12:17:23.205061253Z caller=http.go:194 level=debug traceID=24402c00f28025da orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.37713ms" +ts=2024-05-02T12:17:23.205089273Z caller=http.go:194 level=debug traceID=41180fc05e65d7c9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 16.009478ms" +ts=2024-05-02T12:17:23.2019616Z caller=http.go:194 level=debug traceID=00c62e945a9e8571 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.77775ms" +ts=2024-05-02T12:17:23.199740101Z caller=http.go:194 level=debug traceID=7f2f61152e0496b8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.123993ms" +ts=2024-05-02T12:17:23.19958767Z caller=http.go:194 level=debug traceID=6f8392b325e8011e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.583191ms" +ts=2024-05-02T12:17:23.198648222Z caller=http.go:194 level=debug traceID=300713f29e2bd4b3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.971404ms" +ts=2024-05-02T12:17:23.198199284Z caller=http.go:194 level=debug traceID=7f314b5c85cd6b82 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.288754ms" +ts=2024-05-02T12:17:23.196278629Z caller=http.go:194 level=debug traceID=00fe2fdcebd53b6a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.50574ms" +ts=2024-05-02T12:17:23.195301435Z caller=http.go:194 level=debug traceID=37f37d6be6c8bf48 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.173152ms" +ts=2024-05-02T12:17:23.195211768Z caller=http.go:194 level=debug traceID=315806f9fc5f7660 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.798115ms" +ts=2024-05-02T12:17:23.195237237Z caller=http.go:194 level=debug traceID=226e75584bed8fff orgID=3648 msg="POST /push.v1.PusherService/Push (200) 290.566µs" +ts=2024-05-02T12:17:23.194704009Z caller=http.go:194 level=debug traceID=1687c988400f26a7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.718743ms" +ts=2024-05-02T12:17:23.193466967Z caller=http.go:194 level=debug traceID=464663a1b8f84e8a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.856355ms" +ts=2024-05-02T12:17:23.193379326Z caller=http.go:194 level=debug traceID=067c9c1c2ac8caa0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 277.992µs" +ts=2024-05-02T12:17:23.191297373Z caller=http.go:194 level=debug traceID=00c62e945a9e8571 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.189128ms" +ts=2024-05-02T12:17:23.191224524Z caller=http.go:194 level=debug traceID=40d17a26a36d3358 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.850703ms" +ts=2024-05-02T12:17:23.190685403Z caller=http.go:194 level=debug traceID=6f8392b325e8011e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.921703ms" +ts=2024-05-02T12:17:23.189667914Z caller=http.go:194 level=debug traceID=566e7d945fdc1104 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.751222ms" +ts=2024-05-02T12:17:23.1881743Z caller=http.go:194 level=debug traceID=7f2f61152e0496b8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.745759ms" +ts=2024-05-02T12:17:23.187534956Z caller=http.go:194 level=debug traceID=7f314b5c85cd6b82 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.913158ms" +ts=2024-05-02T12:17:23.185503793Z caller=http.go:194 level=debug traceID=00fe2fdcebd53b6a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.992482ms" +ts=2024-05-02T12:17:23.185065843Z caller=http.go:194 level=debug traceID=37f37d6be6c8bf48 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.156465ms" +ts=2024-05-02T12:17:23.184777583Z caller=http.go:194 level=debug traceID=300713f29e2bd4b3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.116033ms" +ts=2024-05-02T12:17:23.184782862Z caller=http.go:194 level=debug traceID=40d17a26a36d3358 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.991501ms" +ts=2024-05-02T12:17:23.184754894Z caller=http.go:194 level=debug traceID=226e75584bed8fff orgID=1218 msg="POST /push.v1.PusherService/Push (200) 333.434µs" +ts=2024-05-02T12:17:23.184423425Z caller=http.go:194 level=debug traceID=1687c988400f26a7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.790768ms" +ts=2024-05-02T12:17:23.182479678Z caller=http.go:194 level=debug traceID=587cae9724ae0826 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.438513ms" +ts=2024-05-02T12:17:23.182508339Z caller=http.go:194 level=debug traceID=464663a1b8f84e8a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.98887ms" +ts=2024-05-02T12:17:23.181943566Z caller=http.go:194 level=debug traceID=067c9c1c2ac8caa0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 361.891µs" +ts=2024-05-02T12:17:23.179574914Z caller=http.go:194 level=debug traceID=566e7d945fdc1104 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.846318ms" +ts=2024-05-02T12:17:23.17905832Z caller=http.go:194 level=debug traceID=325a1c54cc20fb20 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.692268ms" +ts=2024-05-02T12:17:23.177140838Z caller=http.go:194 level=debug traceID=724da0ad154b03b0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.516254ms" +ts=2024-05-02T12:17:23.17602749Z caller=http.go:194 level=debug traceID=6f2376c854b01217 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.684908ms" +ts=2024-05-02T12:17:23.17513653Z caller=http.go:194 level=debug traceID=5f59df3792dc8f9c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.411498ms" +ts=2024-05-02T12:17:23.174643414Z caller=http.go:194 level=debug traceID=6562036a2e532280 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.308821ms" +ts=2024-05-02T12:17:23.171953949Z caller=http.go:194 level=debug traceID=587cae9724ae0826 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.094971ms" +ts=2024-05-02T12:17:23.171267735Z caller=http.go:194 level=debug traceID=351cb108c232ee5e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.105664ms" +ts=2024-05-02T12:17:23.168898716Z caller=http.go:194 level=debug traceID=6fa600fc057a53a9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.934137ms" +ts=2024-05-02T12:17:23.167564839Z caller=http.go:194 level=debug traceID=6f2376c854b01217 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.194415ms" +ts=2024-05-02T12:17:23.167325774Z caller=http.go:194 level=debug traceID=325a1c54cc20fb20 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.668716ms" +ts=2024-05-02T12:17:23.166706808Z caller=http.go:194 level=debug traceID=13d1cf303f6f3d97 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.166714ms" +ts=2024-05-02T12:17:23.16641318Z caller=http.go:194 level=debug traceID=62e741f85a898744 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.12093ms" +ts=2024-05-02T12:17:23.165277712Z caller=http.go:194 level=debug traceID=724da0ad154b03b0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.042932ms" +ts=2024-05-02T12:17:23.164923308Z caller=http.go:194 level=debug traceID=440bb2419c03cadd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.33133ms" +ts=2024-05-02T12:17:23.163323787Z caller=http.go:194 level=debug traceID=6562036a2e532280 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.243325ms" +ts=2024-05-02T12:17:23.163015446Z caller=http.go:194 level=debug traceID=5f59df3792dc8f9c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.018603ms" +ts=2024-05-02T12:17:23.160091877Z caller=http.go:194 level=debug traceID=351cb108c232ee5e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.772007ms" +ts=2024-05-02T12:17:23.157661262Z caller=http.go:194 level=debug traceID=6fa600fc057a53a9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.449647ms" +ts=2024-05-02T12:17:23.155634915Z caller=http.go:194 level=debug traceID=13d1cf303f6f3d97 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.723309ms" +ts=2024-05-02T12:17:23.155174755Z caller=http.go:194 level=debug traceID=2e977b045a0aee30 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.135422ms" +ts=2024-05-02T12:17:23.154209287Z caller=http.go:194 level=debug traceID=62e741f85a898744 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.485279ms" +ts=2024-05-02T12:17:23.153334987Z caller=http.go:194 level=debug traceID=440bb2419c03cadd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.449372ms" +ts=2024-05-02T12:17:23.152124385Z caller=http.go:194 level=debug traceID=779a92af5c6dbca8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 427.631µs" +ts=2024-05-02T12:17:23.147531946Z caller=http.go:194 level=debug traceID=305ede08166589b2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.37651ms" +ts=2024-05-02T12:17:23.146495969Z caller=http.go:194 level=debug traceID=0afc144bd50bc837 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.840173ms" +ts=2024-05-02T12:17:23.14572449Z caller=http.go:194 level=debug traceID=0c478d9e3575457c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.982212ms" +ts=2024-05-02T12:17:23.144251366Z caller=http.go:194 level=debug traceID=2e977b045a0aee30 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.312126ms" +ts=2024-05-02T12:17:23.143059808Z caller=http.go:194 level=debug traceID=4eebe7abd449d009 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.632625ms" +ts=2024-05-02T12:17:23.141956999Z caller=http.go:194 level=debug traceID=7ae79a63c47f1430 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.270944ms" +ts=2024-05-02T12:17:23.141719827Z caller=http.go:194 level=debug traceID=4013842950df51a3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.626634ms" +ts=2024-05-02T12:17:23.140553904Z caller=http.go:194 level=debug traceID=779a92af5c6dbca8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 421.035µs" +ts=2024-05-02T12:17:23.139555626Z caller=http.go:194 level=debug traceID=00c3ab8f01a33ed9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 242.783µs" +ts=2024-05-02T12:17:23.138715563Z caller=http.go:194 level=debug traceID=7985548f479ce553 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 213.624µs" +ts=2024-05-02T12:17:23.138057185Z caller=http.go:194 level=debug traceID=572e9897e1db2a29 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.555122ms" +ts=2024-05-02T12:17:23.137481946Z caller=http.go:194 level=debug traceID=305ede08166589b2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.572743ms" +ts=2024-05-02T12:17:23.137251475Z caller=http.go:194 level=debug traceID=376690c20e2398f8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.842796ms" +ts=2024-05-02T12:17:23.136887361Z caller=http.go:194 level=debug traceID=6a8f4d0ee4ddc145 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.101011ms" +ts=2024-05-02T12:17:23.136224889Z caller=http.go:194 level=debug traceID=4ce7b3613f8aa8b7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.295269ms" +ts=2024-05-02T12:17:23.134947993Z caller=http.go:194 level=debug traceID=0afc144bd50bc837 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.70496ms" +ts=2024-05-02T12:17:23.134813239Z caller=http.go:194 level=debug traceID=59ccedc91e90a8ae orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.156307ms" +ts=2024-05-02T12:17:23.1341343Z caller=http.go:194 level=debug traceID=0c478d9e3575457c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.034306ms" +ts=2024-05-02T12:17:23.131791532Z caller=http.go:194 level=debug traceID=4eebe7abd449d009 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.042557ms" +ts=2024-05-02T12:17:23.131738689Z caller=http.go:194 level=debug traceID=339cc53f8c2bc180 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.334893ms" +ts=2024-05-02T12:17:23.123374885Z caller=http.go:194 level=debug traceID=4ce7b3613f8aa8b7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.28877ms" +ts=2024-05-02T12:17:23.130341737Z caller=http.go:194 level=debug traceID=69b6589bd3564424 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.190359ms" +ts=2024-05-02T12:17:23.130192213Z caller=http.go:194 level=debug traceID=4013842950df51a3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.908149ms" +ts=2024-05-02T12:17:23.129966905Z caller=http.go:194 level=debug traceID=7ae79a63c47f1430 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.795286ms" +ts=2024-05-02T12:17:23.128893401Z caller=http.go:194 level=debug traceID=1bc40bca82973c33 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.660816ms" +ts=2024-05-02T12:17:23.128775474Z caller=http.go:194 level=debug traceID=572e9897e1db2a29 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.295389ms" +ts=2024-05-02T12:17:23.128621366Z caller=http.go:194 level=debug traceID=00c3ab8f01a33ed9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 355.04µs" +ts=2024-05-02T12:17:23.128594371Z caller=http.go:194 level=debug traceID=6171339957424950 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.411865ms" +ts=2024-05-02T12:17:23.127723491Z caller=http.go:194 level=debug traceID=29e74f4c2e91e54b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.67636ms" +ts=2024-05-02T12:17:23.127515317Z caller=http.go:194 level=debug traceID=7985548f479ce553 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 360.883µs" +ts=2024-05-02T12:17:23.127079245Z caller=http.go:194 level=debug traceID=376690c20e2398f8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.803408ms" +ts=2024-05-02T12:17:23.125052164Z caller=http.go:194 level=debug traceID=59ccedc91e90a8ae orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.3052ms" +ts=2024-05-02T12:17:23.123746528Z caller=http.go:194 level=debug traceID=6a8f4d0ee4ddc145 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.543572ms" +ts=2024-05-02T12:17:23.122457481Z caller=http.go:194 level=debug traceID=4271e863d927fe70 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.246964ms" +ts=2024-05-02T12:17:23.122121499Z caller=http.go:194 level=debug traceID=32eee51b7befb205 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.53001ms" +ts=2024-05-02T12:17:23.122128696Z caller=http.go:194 level=debug traceID=339cc53f8c2bc180 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.366743ms" +ts=2024-05-02T12:17:23.122024725Z caller=http.go:194 level=debug traceID=60e43ab3c0c1adf1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.227637ms" +ts=2024-05-02T12:17:23.120156673Z caller=http.go:194 level=debug traceID=69b6589bd3564424 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.344986ms" +ts=2024-05-02T12:17:23.119105646Z caller=http.go:194 level=debug traceID=1bc40bca82973c33 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.167034ms" +ts=2024-05-02T12:17:23.116911666Z caller=http.go:194 level=debug traceID=1e3e6d4a3d9729b3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.784095ms" +ts=2024-05-02T12:17:23.116682305Z caller=http.go:194 level=debug traceID=6171339957424950 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.415311ms" +ts=2024-05-02T12:17:23.115334263Z caller=http.go:194 level=debug traceID=253122cdf8eff94c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.44302ms" +ts=2024-05-02T12:17:23.114732391Z caller=http.go:194 level=debug traceID=74276f7178bf98eb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.999616ms" +ts=2024-05-02T12:17:23.114091326Z caller=http.go:194 level=debug traceID=4271e863d927fe70 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.942307ms" +ts=2024-05-02T12:17:23.113341314Z caller=http.go:194 level=debug traceID=18c27a15cd9a790c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.034005ms" +ts=2024-05-02T12:17:23.112823201Z caller=http.go:194 level=debug traceID=12cfea2a31ea3f30 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.09311ms" +ts=2024-05-02T12:17:23.111959463Z caller=http.go:194 level=debug traceID=1783cfd7e4984dce orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.689301ms" +ts=2024-05-02T12:17:23.111880846Z caller=http.go:194 level=debug traceID=29e74f4c2e91e54b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.259579ms" +ts=2024-05-02T12:17:23.109217099Z caller=http.go:194 level=debug traceID=60e43ab3c0c1adf1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.065015ms" +ts=2024-05-02T12:17:23.109189561Z caller=http.go:194 level=debug traceID=32eee51b7befb205 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.648507ms" +ts=2024-05-02T12:17:23.107976742Z caller=http.go:194 level=debug traceID=0e8096cdb086a14b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.335793ms" +ts=2024-05-02T12:17:23.107573379Z caller=http.go:194 level=debug traceID=0af6e77b7a978cf5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.651292ms" +ts=2024-05-02T12:17:23.107109063Z caller=http.go:194 level=debug traceID=58eba93727da44f0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.174169ms" +ts=2024-05-02T12:17:23.105253655Z caller=http.go:194 level=debug traceID=1e3e6d4a3d9729b3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.503999ms" +ts=2024-05-02T12:17:23.104504908Z caller=http.go:194 level=debug traceID=74276f7178bf98eb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.838994ms" +ts=2024-05-02T12:17:23.103851056Z caller=http.go:194 level=debug traceID=253122cdf8eff94c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.601754ms" +ts=2024-05-02T12:17:23.102410376Z caller=http.go:194 level=debug traceID=1783cfd7e4984dce orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.447778ms" +ts=2024-05-02T12:17:23.100462116Z caller=http.go:194 level=debug traceID=12cfea2a31ea3f30 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 396.575µs" +ts=2024-05-02T12:17:23.100417001Z caller=http.go:194 level=debug traceID=0531e646e3f73ec5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.939705ms" +ts=2024-05-02T12:17:23.100206571Z caller=http.go:194 level=debug traceID=052a8cde41323cca orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.110021ms" +ts=2024-05-02T12:17:23.100098448Z caller=http.go:194 level=debug traceID=18c27a15cd9a790c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.670096ms" +ts=2024-05-02T12:17:23.097429237Z caller=http.go:194 level=debug traceID=47db7e081b27eae2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.091817ms" +ts=2024-05-02T12:17:23.097246304Z caller=http.go:194 level=debug traceID=0e8096cdb086a14b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.998738ms" +ts=2024-05-02T12:17:23.096293214Z caller=http.go:194 level=debug traceID=0af6e77b7a978cf5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.756014ms" +ts=2024-05-02T12:17:23.095604012Z caller=http.go:194 level=debug traceID=58eba93727da44f0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.995566ms" +ts=2024-05-02T12:17:23.095348082Z caller=http.go:194 level=debug traceID=3cc1fd6d2ea6ceed orgID=3648 msg="POST /push.v1.PusherService/Push (200) 18.549384ms" +ts=2024-05-02T12:17:23.095056858Z caller=http.go:194 level=debug traceID=47917e37c4028ab8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.589045ms" +ts=2024-05-02T12:17:23.093843196Z caller=http.go:194 level=debug traceID=686b00545d3d509e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.544008ms" +ts=2024-05-02T12:17:23.093561437Z caller=http.go:194 level=debug traceID=25f4144191d8c07f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 266.934µs" +ts=2024-05-02T12:17:23.091824858Z caller=http.go:194 level=debug traceID=19da975e71d63f22 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.955553ms" +ts=2024-05-02T12:17:23.090737596Z caller=http.go:194 level=debug traceID=0531e646e3f73ec5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.349532ms" +ts=2024-05-02T12:17:23.089721703Z caller=http.go:194 level=debug traceID=052a8cde41323cca orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.194039ms" +ts=2024-05-02T12:17:23.088710986Z caller=http.go:194 level=debug traceID=5023503837295d40 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 323.212µs" +ts=2024-05-02T12:17:23.088313345Z caller=http.go:194 level=debug traceID=5f573f3711a95a08 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.31402ms" +ts=2024-05-02T12:17:23.086741829Z caller=http.go:194 level=debug traceID=435f484d45e5bc11 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.081556ms" +ts=2024-05-02T12:17:23.086540896Z caller=http.go:194 level=debug traceID=47db7e081b27eae2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.558929ms" +ts=2024-05-02T12:17:23.086403299Z caller=http.go:194 level=debug traceID=5f821f3ebcc75187 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.593477ms" +ts=2024-05-02T12:17:23.085914409Z caller=http.go:194 level=debug traceID=4ce531b047d77cad orgID=3648 msg="POST /push.v1.PusherService/Push (200) 347.172µs" +ts=2024-05-02T12:17:23.083764244Z caller=http.go:194 level=debug traceID=25f4144191d8c07f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.435188ms" +ts=2024-05-02T12:17:23.083122183Z caller=http.go:194 level=debug traceID=686b00545d3d509e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.036454ms" +ts=2024-05-02T12:17:23.081370054Z caller=http.go:194 level=debug traceID=19da975e71d63f22 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.893972ms" +ts=2024-05-02T12:17:23.081401555Z caller=http.go:194 level=debug traceID=3cc1fd6d2ea6ceed orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.350308ms" +ts=2024-05-02T12:17:23.080535487Z caller=http.go:194 level=debug traceID=66f930eb24bf47c5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.842265ms" +ts=2024-05-02T12:17:23.080532822Z caller=http.go:194 level=debug traceID=62c5a7cba1b7ef50 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.815625ms" +ts=2024-05-02T12:17:23.078594328Z caller=http.go:194 level=debug traceID=5023503837295d40 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 332.024µs" +ts=2024-05-02T12:17:23.078171174Z caller=http.go:194 level=debug traceID=47917e37c4028ab8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.288453ms" +ts=2024-05-02T12:17:23.077113901Z caller=http.go:194 level=debug traceID=5f573f3711a95a08 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.554129ms" +ts=2024-05-02T12:17:23.076987861Z caller=http.go:194 level=debug traceID=08aec7e11e8a2d09 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.356112ms" +ts=2024-05-02T12:17:23.07575257Z caller=http.go:194 level=debug traceID=4ce531b047d77cad orgID=1218 msg="POST /push.v1.PusherService/Push (200) 343.808µs" +ts=2024-05-02T12:17:23.075764596Z caller=http.go:194 level=debug traceID=5f821f3ebcc75187 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.317804ms" +ts=2024-05-02T12:17:23.075534422Z caller=http.go:194 level=debug traceID=435f484d45e5bc11 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.065164ms" +ts=2024-05-02T12:17:23.073648372Z caller=http.go:194 level=debug traceID=2db54337202dff28 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 356.65µs" +ts=2024-05-02T12:17:23.073440037Z caller=http.go:194 level=debug traceID=0cd44fe51dc5e9cb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.24041ms" +ts=2024-05-02T12:17:23.071318641Z caller=http.go:194 level=debug traceID=250d87846da6ee77 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.592298ms" +ts=2024-05-02T12:17:23.069125898Z caller=http.go:194 level=debug traceID=62c5a7cba1b7ef50 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.826042ms" +ts=2024-05-02T12:17:23.068013995Z caller=http.go:194 level=debug traceID=66f930eb24bf47c5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.926531ms" +ts=2024-05-02T12:17:23.06693289Z caller=http.go:194 level=debug traceID=08aec7e11e8a2d09 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.724256ms" +ts=2024-05-02T12:17:23.064115829Z caller=http.go:194 level=debug traceID=6f43dd234e6a04cf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.829592ms" +ts=2024-05-02T12:17:23.063269432Z caller=http.go:194 level=debug traceID=32ef5882097bafd2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.043887ms" +ts=2024-05-02T12:17:23.062097898Z caller=http.go:194 level=debug traceID=2db54337202dff28 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 328.755µs" +ts=2024-05-02T12:17:23.061953554Z caller=http.go:194 level=debug traceID=64c7724b138820ae orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.675311ms" +ts=2024-05-02T12:17:23.060405792Z caller=http.go:194 level=debug traceID=250d87846da6ee77 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.233783ms" +ts=2024-05-02T12:17:23.059553883Z caller=http.go:194 level=debug traceID=1fe33ab50ebef2e1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.786432ms" +ts=2024-05-02T12:17:23.058282676Z caller=http.go:194 level=debug traceID=33f18f6f8af5dac8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.27245ms" +ts=2024-05-02T12:17:23.058618114Z caller=http.go:194 level=debug traceID=5d64406e346f2431 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.062173ms" +ts=2024-05-02T12:17:23.057795425Z caller=http.go:194 level=debug traceID=0cd44fe51dc5e9cb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 16.180958ms" +ts=2024-05-02T12:17:23.057606428Z caller=http.go:194 level=debug traceID=612f13f79e95ac5b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.427464ms" +ts=2024-05-02T12:17:23.053934793Z caller=http.go:194 level=debug traceID=6f43dd234e6a04cf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.870476ms" +ts=2024-05-02T12:17:23.052414722Z caller=http.go:194 level=debug traceID=32ef5882097bafd2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.336059ms" +ts=2024-05-02T12:17:23.052245085Z caller=http.go:194 level=debug traceID=64c7724b138820ae orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.298828ms" +ts=2024-05-02T12:17:23.049578892Z caller=http.go:194 level=debug traceID=1fe33ab50ebef2e1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.053363ms" +ts=2024-05-02T12:17:23.047944596Z caller=http.go:194 level=debug traceID=612f13f79e95ac5b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.792638ms" +ts=2024-05-02T12:17:23.047016664Z caller=http.go:194 level=debug traceID=4a951091ce4d7eef orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.850236ms" +ts=2024-05-02T12:17:23.046029287Z caller=http.go:194 level=debug traceID=5d64406e346f2431 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.579212ms" +ts=2024-05-02T12:17:23.044001119Z caller=http.go:194 level=debug traceID=33f18f6f8af5dac8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.098312ms" +ts=2024-05-02T12:17:23.042736998Z caller=http.go:194 level=debug traceID=3edc16de91c46997 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 344.164µs" +ts=2024-05-02T12:17:23.038220356Z caller=http.go:194 level=debug traceID=4e013aa4edbd460b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.887565ms" +ts=2024-05-02T12:17:23.03633488Z caller=http.go:194 level=debug traceID=4a951091ce4d7eef orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.832254ms" +ts=2024-05-02T12:17:23.034155934Z caller=http.go:194 level=debug traceID=747b1bc2da0e9a91 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.310171ms" +ts=2024-05-02T12:17:23.033332137Z caller=http.go:194 level=debug traceID=4e02d737d63530c1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.277324ms" +ts=2024-05-02T12:17:23.032348723Z caller=http.go:194 level=debug traceID=4d2a26cc0d74640b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.167916ms" +ts=2024-05-02T12:17:23.031607996Z caller=http.go:194 level=debug traceID=3edc16de91c46997 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 501.724µs" +ts=2024-05-02T12:17:23.031162312Z caller=http.go:194 level=debug traceID=193e0d478e37d153 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.941092ms" +ts=2024-05-02T12:17:23.028091343Z caller=http.go:194 level=debug traceID=1cc46ae5e6e2b701 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.610615ms" +ts=2024-05-02T12:17:23.027882368Z caller=http.go:194 level=debug traceID=4e013aa4edbd460b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.828819ms" +ts=2024-05-02T12:17:23.027413582Z caller=http.go:194 level=debug traceID=63d249ee74ff470c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.989015ms" +ts=2024-05-02T12:17:23.024803041Z caller=http.go:194 level=debug traceID=1f3d1b8b5eda3764 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.953509ms" +ts=2024-05-02T12:17:23.023523826Z caller=http.go:194 level=debug traceID=747b1bc2da0e9a91 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.773204ms" +ts=2024-05-02T12:17:23.022578334Z caller=http.go:194 level=debug traceID=4ea20491f27b0749 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 308.292µs" +ts=2024-05-02T12:17:23.021991402Z caller=http.go:194 level=debug traceID=49c3387acf6de515 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.282608ms" +ts=2024-05-02T12:17:23.021430598Z caller=http.go:194 level=debug traceID=4e02d737d63530c1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.08365ms" +ts=2024-05-02T12:17:23.021133938Z caller=http.go:194 level=debug traceID=193e0d478e37d153 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.053704ms" +ts=2024-05-02T12:17:23.020989602Z caller=http.go:194 level=debug traceID=7ce0d49bdb651f9a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.58534ms" +ts=2024-05-02T12:17:23.020849744Z caller=http.go:194 level=debug traceID=53c95face7002ecc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.459982ms" +ts=2024-05-02T12:17:23.020105975Z caller=http.go:194 level=debug traceID=4d2a26cc0d74640b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.197921ms" +ts=2024-05-02T12:17:23.019431734Z caller=http.go:194 level=debug traceID=2fbe4761db3cf1ef orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.653369ms" +ts=2024-05-02T12:17:23.017245541Z caller=http.go:194 level=debug traceID=1cc46ae5e6e2b701 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.916222ms" +ts=2024-05-02T12:17:23.015560193Z caller=http.go:194 level=debug traceID=0b2d3efec180a4eb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.687753ms" +ts=2024-05-02T12:17:23.014437741Z caller=http.go:194 level=debug traceID=7ce0d49bdb651f9a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 16.227023ms" +ts=2024-05-02T12:17:23.013930026Z caller=http.go:194 level=debug traceID=63d249ee74ff470c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.908211ms" +ts=2024-05-02T12:17:23.013403245Z caller=http.go:194 level=debug traceID=1f3d1b8b5eda3764 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.717484ms" +ts=2024-05-02T12:17:23.012871627Z caller=http.go:194 level=debug traceID=5ebf3707e6777863 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.713385ms" +ts=2024-05-02T12:17:23.012128561Z caller=http.go:194 level=debug traceID=5b13d94729944531 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.907618ms" +ts=2024-05-02T12:17:23.01189313Z caller=http.go:194 level=debug traceID=4ea20491f27b0749 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 435.313µs" +ts=2024-05-02T12:17:23.01156738Z caller=http.go:194 level=debug traceID=16a2b5413f2b06bc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.317637ms" +ts=2024-05-02T12:17:23.011435403Z caller=http.go:194 level=debug traceID=2fbe4761db3cf1ef orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.66066ms" +ts=2024-05-02T12:17:23.010433616Z caller=http.go:194 level=debug traceID=5f6d6c1666b87254 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.770025ms" +ts=2024-05-02T12:17:23.009902743Z caller=http.go:194 level=debug traceID=53c95face7002ecc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.752107ms" +ts=2024-05-02T12:17:23.008245296Z caller=http.go:194 level=debug traceID=3188a0a386163750 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.002171ms" +ts=2024-05-02T12:17:23.006986202Z caller=http.go:194 level=debug traceID=72cfcced06e79908 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.075798ms" +ts=2024-05-02T12:17:23.006739143Z caller=http.go:194 level=debug traceID=49c3387acf6de515 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.106853ms" +ts=2024-05-02T12:17:23.006320824Z caller=http.go:194 level=debug traceID=6597a52c6f00d314 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.643885ms" +ts=2024-05-02T12:17:23.004836949Z caller=http.go:194 level=debug traceID=0b2d3efec180a4eb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.862008ms" +ts=2024-05-02T12:17:23.004158334Z caller=http.go:194 level=debug traceID=2ddf37ae90ed86ba orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.341422ms" +ts=2024-05-02T12:17:23.003452962Z caller=http.go:194 level=debug traceID=7e179f585b750ed3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 260.82µs" +ts=2024-05-02T12:17:23.001263613Z caller=http.go:194 level=debug traceID=16a2b5413f2b06bc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.025505ms" +ts=2024-05-02T12:17:23.000611054Z caller=http.go:194 level=debug traceID=5b13d94729944531 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.507199ms" +ts=2024-05-02T12:17:23.000606106Z caller=http.go:194 level=debug traceID=5ebf3707e6777863 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.398694ms" +ts=2024-05-02T12:17:23.000224338Z caller=http.go:194 level=debug traceID=43af026a03e1ad56 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.784919ms" +ts=2024-05-02T12:17:22.99971055Z caller=http.go:194 level=debug traceID=75284c766099a92c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.036951ms" +ts=2024-05-02T12:17:22.997368835Z caller=http.go:194 level=debug traceID=5f6d6c1666b87254 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.029159ms" +ts=2024-05-02T12:17:22.997270047Z caller=http.go:194 level=debug traceID=3188a0a386163750 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.10298ms" +ts=2024-05-02T12:17:22.996161358Z caller=http.go:194 level=debug traceID=6597a52c6f00d314 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.76649ms" +ts=2024-05-02T12:17:22.995534132Z caller=http.go:194 level=debug traceID=5bfa2c6e5c4a0439 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.693181ms" +ts=2024-05-02T12:17:22.995251267Z caller=http.go:194 level=debug traceID=72cfcced06e79908 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.632632ms" +ts=2024-05-02T12:17:22.993468384Z caller=http.go:194 level=debug traceID=7e179f585b750ed3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 350.262µs" +ts=2024-05-02T12:17:22.990963667Z caller=http.go:194 level=debug traceID=2ddf37ae90ed86ba orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.651428ms" +ts=2024-05-02T12:17:22.988820912Z caller=http.go:194 level=debug traceID=43af026a03e1ad56 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.859644ms" +ts=2024-05-02T12:17:22.988650797Z caller=http.go:194 level=debug traceID=75284c766099a92c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.061146ms" +ts=2024-05-02T12:17:22.986443905Z caller=http.go:194 level=debug traceID=7343a1b04656958e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.850625ms" +ts=2024-05-02T12:17:22.985743158Z caller=http.go:194 level=debug traceID=3f5ad80c2418c490 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.146776ms" +ts=2024-05-02T12:17:22.985134969Z caller=http.go:194 level=debug traceID=54cac6fe516db9a0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.731145ms" +ts=2024-05-02T12:17:22.984807842Z caller=http.go:194 level=debug traceID=5bfa2c6e5c4a0439 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.702024ms" +ts=2024-05-02T12:17:22.983326824Z caller=http.go:194 level=debug traceID=487e667e4b498c7e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.168073ms" +ts=2024-05-02T12:17:22.982839402Z caller=http.go:194 level=debug traceID=26bb05bd853bbacf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.298063ms" +ts=2024-05-02T12:17:22.982214807Z caller=http.go:194 level=debug traceID=3caf799568e36fbf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 18.790565ms" +ts=2024-05-02T12:17:22.979365638Z caller=http.go:194 level=debug traceID=17b6f1068d1da043 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.329383ms" +ts=2024-05-02T12:17:22.975572894Z caller=http.go:194 level=debug traceID=7343a1b04656958e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.611567ms" +ts=2024-05-02T12:17:22.974731939Z caller=http.go:194 level=debug traceID=54cac6fe516db9a0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.122537ms" +ts=2024-05-02T12:17:22.974244808Z caller=http.go:194 level=debug traceID=3f5ad80c2418c490 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.864441ms" +ts=2024-05-02T12:17:22.971298413Z caller=http.go:194 level=debug traceID=487e667e4b498c7e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.77727ms" +ts=2024-05-02T12:17:22.970302959Z caller=http.go:194 level=debug traceID=6885d74913b15ef1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.4669ms" +ts=2024-05-02T12:17:22.969498312Z caller=http.go:194 level=debug traceID=26bb05bd853bbacf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.191454ms" +ts=2024-05-02T12:17:22.969132918Z caller=http.go:194 level=debug traceID=3bc90b2eb49bb83d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.899064ms" +ts=2024-05-02T12:17:22.969059857Z caller=http.go:194 level=debug traceID=3caf799568e36fbf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.394477ms" +ts=2024-05-02T12:17:22.96690051Z caller=http.go:194 level=debug traceID=17b6f1068d1da043 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.817709ms" +ts=2024-05-02T12:17:22.963713959Z caller=http.go:194 level=debug traceID=217c5ab2b909b15c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 243.381µs" +ts=2024-05-02T12:17:22.963032309Z caller=http.go:194 level=debug traceID=0af0a73e8caff0ab orgID=3648 msg="POST /push.v1.PusherService/Push (200) 298.208µs" +ts=2024-05-02T12:17:22.962688762Z caller=http.go:194 level=debug traceID=27d2c2378e35d9fc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.123036ms" +ts=2024-05-02T12:17:22.962073855Z caller=http.go:194 level=debug traceID=22e35b0c5fecaa13 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.38058ms" +ts=2024-05-02T12:17:22.961366633Z caller=http.go:194 level=debug traceID=10bbd00ed568b382 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.020768ms" +ts=2024-05-02T12:17:22.959888025Z caller=http.go:194 level=debug traceID=6885d74913b15ef1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.166627ms" +ts=2024-05-02T12:17:22.95804217Z caller=http.go:194 level=debug traceID=3bc90b2eb49bb83d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.82783ms" +ts=2024-05-02T12:17:22.956933633Z caller=http.go:194 level=debug traceID=7129ac0b419ee629 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.061629ms" +ts=2024-05-02T12:17:22.953361726Z caller=http.go:194 level=debug traceID=022aa70b678d20ef orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.568793ms" +ts=2024-05-02T12:17:22.952637712Z caller=http.go:194 level=debug traceID=5f5fb21079e19095 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.399426ms" +ts=2024-05-02T12:17:22.951956062Z caller=http.go:194 level=debug traceID=0af0a73e8caff0ab orgID=1218 msg="POST /push.v1.PusherService/Push (200) 416.825µs" +ts=2024-05-02T12:17:22.951812686Z caller=http.go:194 level=debug traceID=217c5ab2b909b15c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 306.357µs" +ts=2024-05-02T12:17:22.95149441Z caller=http.go:194 level=debug traceID=27d2c2378e35d9fc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.109802ms" +ts=2024-05-02T12:17:22.950932189Z caller=http.go:194 level=debug traceID=3cf19ff358d915c9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.323885ms" +ts=2024-05-02T12:17:22.950008422Z caller=http.go:194 level=debug traceID=22e35b0c5fecaa13 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.597151ms" +ts=2024-05-02T12:17:22.949059797Z caller=http.go:194 level=debug traceID=10bbd00ed568b382 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.31207ms" +ts=2024-05-02T12:17:22.945328081Z caller=http.go:194 level=debug traceID=7129ac0b419ee629 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.935948ms" +ts=2024-05-02T12:17:22.944594783Z caller=http.go:194 level=debug traceID=63f9917d8f742c5b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 180.664µs" +ts=2024-05-02T12:17:22.944362279Z caller=http.go:194 level=debug traceID=50f0f45d983cf836 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.876435ms" +ts=2024-05-02T12:17:22.944272864Z caller=http.go:194 level=debug traceID=0e6f4e2f8af3c8d6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.467122ms" +ts=2024-05-02T12:17:22.943954235Z caller=http.go:194 level=debug traceID=11ed2a3b90828d81 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 335.563µs" +ts=2024-05-02T12:17:22.94320595Z caller=http.go:194 level=debug traceID=6450ecb7bb2b4daf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.633722ms" +ts=2024-05-02T12:17:22.943164796Z caller=http.go:194 level=debug traceID=5f5fb21079e19095 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.533114ms" +ts=2024-05-02T12:17:22.942443684Z caller=http.go:194 level=debug traceID=022aa70b678d20ef orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.93635ms" +ts=2024-05-02T12:17:22.938876708Z caller=http.go:194 level=debug traceID=2b8735bc598bee2d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.35323ms" +ts=2024-05-02T12:17:22.938652415Z caller=http.go:194 level=debug traceID=3cf19ff358d915c9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.946982ms" +ts=2024-05-02T12:17:22.937837339Z caller=http.go:194 level=debug traceID=1f7b1aeda9607fab orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.369072ms" +ts=2024-05-02T12:17:22.937612099Z caller=http.go:194 level=debug traceID=58fd3b7adc15e58d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.383251ms" +ts=2024-05-02T12:17:22.937367947Z caller=http.go:194 level=debug traceID=3638680e45f93db9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.360569ms" +ts=2024-05-02T12:17:22.934718986Z caller=http.go:194 level=debug traceID=0e6f4e2f8af3c8d6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.911934ms" +ts=2024-05-02T12:17:22.934642995Z caller=http.go:194 level=debug traceID=63f9917d8f742c5b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 295.735µs" +ts=2024-05-02T12:17:22.932899642Z caller=http.go:194 level=debug traceID=50f0f45d983cf836 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.190863ms" +ts=2024-05-02T12:17:22.932440352Z caller=http.go:194 level=debug traceID=11ed2a3b90828d81 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 319.77µs" +ts=2024-05-02T12:17:22.931286485Z caller=http.go:194 level=debug traceID=2cb989fa00033342 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.637344ms" +ts=2024-05-02T12:17:22.930492939Z caller=http.go:194 level=debug traceID=6450ecb7bb2b4daf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.828433ms" +ts=2024-05-02T12:17:22.928712738Z caller=http.go:194 level=debug traceID=7826873523280f2e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.945537ms" +ts=2024-05-02T12:17:22.928274674Z caller=http.go:194 level=debug traceID=2b8735bc598bee2d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.057153ms" +ts=2024-05-02T12:17:22.927367594Z caller=http.go:194 level=debug traceID=58fd3b7adc15e58d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.525877ms" +ts=2024-05-02T12:17:22.926454762Z caller=http.go:194 level=debug traceID=3638680e45f93db9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.472386ms" +ts=2024-05-02T12:17:22.925846397Z caller=http.go:194 level=debug traceID=03aa3713f23cfe2f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.409509ms" +ts=2024-05-02T12:17:22.925315173Z caller=http.go:194 level=debug traceID=48f6e3a123c9a4a1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 363.072µs" +ts=2024-05-02T12:17:22.925084614Z caller=http.go:194 level=debug traceID=3c659183769d659b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 332.2µs" +ts=2024-05-02T12:17:22.923003366Z caller=http.go:194 level=debug traceID=4c455072ea237b19 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.68123ms" +ts=2024-05-02T12:17:22.921551665Z caller=http.go:194 level=debug traceID=1f7b1aeda9607fab orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.564057ms" +ts=2024-05-02T12:17:22.921540524Z caller=http.go:194 level=debug traceID=5f60fc0fd6e77168 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.018642ms" +ts=2024-05-02T12:17:22.920792563Z caller=http.go:194 level=debug traceID=2cb989fa00033342 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.270956ms" +ts=2024-05-02T12:17:22.920906425Z caller=http.go:194 level=debug traceID=2aa09fd200af7730 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.618951ms" +ts=2024-05-02T12:17:22.919084265Z caller=http.go:194 level=debug traceID=1a2af541965b36d2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.817713ms" +ts=2024-05-02T12:17:22.918980904Z caller=http.go:194 level=debug traceID=509668cbab71d57d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.398856ms" +ts=2024-05-02T12:17:22.918226533Z caller=http.go:194 level=debug traceID=1f0e4aa340429bef orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.681652ms" +ts=2024-05-02T12:17:22.918128894Z caller=http.go:194 level=debug traceID=0bc450b66f5d023c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.458733ms" +ts=2024-05-02T12:17:22.918058136Z caller=http.go:194 level=debug traceID=7826873523280f2e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.91817ms" +ts=2024-05-02T12:17:22.917777103Z caller=http.go:194 level=debug traceID=792dbc8b8fbab5a6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.955442ms" +ts=2024-05-02T12:17:22.914842681Z caller=http.go:194 level=debug traceID=633fc5ea1fb684a8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 326.455µs" +ts=2024-05-02T12:17:22.913758562Z caller=http.go:194 level=debug traceID=3c659183769d659b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 258.09µs" +ts=2024-05-02T12:17:22.913397196Z caller=http.go:194 level=debug traceID=03aa3713f23cfe2f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.698178ms" +ts=2024-05-02T12:17:22.913227607Z caller=http.go:194 level=debug traceID=48f6e3a123c9a4a1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 359.431µs" +ts=2024-05-02T12:17:22.913091521Z caller=http.go:194 level=debug traceID=4c455072ea237b19 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.763899ms" +ts=2024-05-02T12:17:22.911476417Z caller=http.go:194 level=debug traceID=06316a77c4040ff9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.551498ms" +ts=2024-05-02T12:17:22.910956091Z caller=http.go:194 level=debug traceID=5f60fc0fd6e77168 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.314808ms" +ts=2024-05-02T12:17:22.909622447Z caller=http.go:194 level=debug traceID=2aa09fd200af7730 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.789059ms" +ts=2024-05-02T12:17:22.908860782Z caller=http.go:194 level=debug traceID=6844637cbbb7f44b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.270911ms" +ts=2024-05-02T12:17:22.908762042Z caller=http.go:194 level=debug traceID=1a2af541965b36d2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.426692ms" +ts=2024-05-02T12:17:22.907057387Z caller=http.go:194 level=debug traceID=792dbc8b8fbab5a6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.506024ms" +ts=2024-05-02T12:17:22.907081414Z caller=http.go:194 level=debug traceID=0bc450b66f5d023c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.583802ms" +ts=2024-05-02T12:17:22.906197057Z caller=http.go:194 level=debug traceID=1f0e4aa340429bef orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.043855ms" +ts=2024-05-02T12:17:22.905957517Z caller=http.go:194 level=debug traceID=509668cbab71d57d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.567911ms" +ts=2024-05-02T12:17:22.905509558Z caller=http.go:194 level=debug traceID=0042fc15894eb3af orgID=3648 msg="POST /push.v1.PusherService/Push (400) 168.691µs" +ts=2024-05-02T12:17:22.904701229Z caller=http.go:194 level=debug traceID=633fc5ea1fb684a8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 375.725µs" +ts=2024-05-02T12:17:22.904598325Z caller=http.go:194 level=debug traceID=768f1bf4dc2fe587 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.991321ms" +ts=2024-05-02T12:17:22.904479768Z caller=http.go:194 level=debug traceID=57c56cb15db302d6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 327.806µs" +ts=2024-05-02T12:17:22.903922415Z caller=http.go:194 level=debug traceID=0d86eb19fdafdca6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 13.143218ms" +ts=2024-05-02T12:17:22.90385921Z caller=http.go:194 level=debug traceID=3c63750660b25f82 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.242198ms" +ts=2024-05-02T12:17:22.901622005Z caller=http.go:194 level=debug traceID=06316a77c4040ff9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.602739ms" +ts=2024-05-02T12:17:22.898002855Z caller=http.go:194 level=debug traceID=6844637cbbb7f44b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.254145ms" +ts=2024-05-02T12:17:22.895901333Z caller=http.go:194 level=debug traceID=485db3ae7d39aef5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.607578ms" +ts=2024-05-02T12:17:22.895314669Z caller=http.go:194 level=debug traceID=2fa95f595c30c7c8 orgID=1819 msg="POST /pyroscope/ingest?aggregationType=sum&from=1714652230&name=flamegraph.com.frontend%7Bapp_kubernetes_io_instance%3Dflamegraph-com%2Capp_kubernetes_io_name%3Dflamegraph-com%2Ccluster%3Dflamegraph.com%2Cinstance%3D10.0.9.115%3A9091%2Cjob%3Dkubernetes-pods%2Cnamespace%3Dflamegraph-com%2Cpod%3Dflamegraph-com-frontend-6fb87f8785-pd87k%2Cpod_template_hash%3D6fb87f8785%2Cpyroscope_tenant%3Dpyroscope%2Ctier%3Dfrontend%7D&sampleRate=0&spyName=scrape&units=samples&until=1714652240 (200) 4.017327ms" +ts=2024-05-02T12:17:22.895112942Z caller=http.go:194 level=debug traceID=768f1bf4dc2fe587 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.154234ms" +ts=2024-05-02T12:17:22.894646925Z caller=http.go:194 level=debug traceID=61e707c14f148470 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.971599ms" +ts=2024-05-02T12:17:22.894374167Z caller=http.go:194 level=debug traceID=0042fc15894eb3af orgID=1218 msg="POST /push.v1.PusherService/Push (400) 225.49µs" +ts=2024-05-02T12:17:22.892999171Z caller=http.go:194 level=debug traceID=57c56cb15db302d6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 292.842µs" +ts=2024-05-02T12:17:22.892045732Z caller=http.go:194 level=debug traceID=3c63750660b25f82 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.665968ms" +ts=2024-05-02T12:17:22.891538242Z caller=http.go:194 level=debug traceID=42972a3362398fc1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.244244ms" +ts=2024-05-02T12:17:22.887221091Z caller=http.go:194 level=debug traceID=0d86eb19fdafdca6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.68253ms" +ts=2024-05-02T12:17:22.886677851Z caller=http.go:194 level=debug traceID=5116f3a8fbbb201d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.826727ms" +ts=2024-05-02T12:17:22.886075616Z caller=http.go:194 level=debug traceID=485db3ae7d39aef5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.905133ms" +ts=2024-05-02T12:17:22.882859977Z caller=http.go:194 level=debug traceID=61e707c14f148470 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.197662ms" +ts=2024-05-02T12:17:22.881900386Z caller=http.go:194 level=debug traceID=269741a56ee85b9d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.161133ms" +ts=2024-05-02T12:17:22.880773095Z caller=http.go:194 level=debug traceID=42972a3362398fc1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.060657ms" +ts=2024-05-02T12:17:22.879538529Z caller=http.go:194 level=debug traceID=6c6d49d6a9af538b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.340063ms" +ts=2024-05-02T12:17:22.878949841Z caller=http.go:194 level=debug traceID=5116f3a8fbbb201d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.150384ms" +ts=2024-05-02T12:17:22.876774969Z caller=http.go:194 level=debug traceID=0fefce7ac7a4c601 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.629028ms" +ts=2024-05-02T12:17:22.871696418Z caller=http.go:194 level=debug traceID=3c4a1341e4cf8e4b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.974289ms" +ts=2024-05-02T12:17:22.871610359Z caller=http.go:194 level=debug traceID=59c02e41a442d94e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.399293ms" +ts=2024-05-02T12:17:22.871262824Z caller=http.go:194 level=debug traceID=536168e4f49cea82 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.125047ms" +ts=2024-05-02T12:17:22.870044219Z caller=http.go:194 level=debug traceID=269741a56ee85b9d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.212746ms" +ts=2024-05-02T12:17:22.869062686Z caller=http.go:194 level=debug traceID=433f2cb6a0764d28 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.170926ms" +ts=2024-05-02T12:17:22.868230899Z caller=http.go:194 level=debug traceID=6c6d49d6a9af538b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.402223ms" +ts=2024-05-02T12:17:22.866431109Z caller=http.go:194 level=debug traceID=77882b5268c9c12c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.800441ms" +ts=2024-05-02T12:17:22.865509445Z caller=http.go:194 level=debug traceID=0fefce7ac7a4c601 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.752036ms" +ts=2024-05-02T12:17:22.863664301Z caller=http.go:194 level=debug traceID=6dcfc7ee2c237f20 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.720722ms" +ts=2024-05-02T12:17:22.863519931Z caller=http.go:194 level=debug traceID=78184834e6c1ddee orgID=3648 msg="POST /push.v1.PusherService/Push (200) 265.773µs" +ts=2024-05-02T12:17:22.861536084Z caller=http.go:194 level=debug traceID=7355d55293969ec2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.937834ms" +ts=2024-05-02T12:17:22.861202851Z caller=http.go:194 level=debug traceID=59c02e41a442d94e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.343672ms" +ts=2024-05-02T12:17:22.860976407Z caller=http.go:194 level=debug traceID=4fe00c7694a5c590 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.036104ms" +ts=2024-05-02T12:17:22.860231683Z caller=http.go:194 level=debug traceID=536168e4f49cea82 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.677409ms" +ts=2024-05-02T12:17:22.859812322Z caller=http.go:194 level=debug traceID=433f2cb6a0764d28 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.526638ms" +ts=2024-05-02T12:17:22.858379953Z caller=http.go:194 level=debug traceID=0f35483ee5f810de orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.687028ms" +ts=2024-05-02T12:17:22.857688341Z caller=http.go:194 level=debug traceID=3c4a1341e4cf8e4b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.781304ms" +ts=2024-05-02T12:17:22.855912327Z caller=http.go:194 level=debug traceID=309e2a48e4b464a9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 13.102377ms" +ts=2024-05-02T12:17:22.855554673Z caller=http.go:194 level=debug traceID=77882b5268c9c12c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.12872ms" +ts=2024-05-02T12:17:22.855427364Z caller=http.go:194 level=debug traceID=742a6b9ce9ac4d0c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.929926ms" +ts=2024-05-02T12:17:22.853989592Z caller=http.go:194 level=debug traceID=0f35483ee5f810de orgID=1218 msg="POST /push.v1.PusherService/Push (200) 22.622635ms" +ts=2024-05-02T12:17:22.852727758Z caller=http.go:194 level=debug traceID=78184834e6c1ddee orgID=1218 msg="POST /push.v1.PusherService/Push (200) 876.149µs" +ts=2024-05-02T12:17:22.851228301Z caller=http.go:194 level=debug traceID=1e1fe5ba1756bc38 orgID=1819 msg="POST /pyroscope/ingest?aggregationType=sum&from=1714652230&name=flamegraph.com%7Bapp_kubernetes_io_instance%3Dflamegraph-com%2Capp_kubernetes_io_name%3Dflamegraph-com%2Ccluster%3Dflamegraph.com%2Cinstance%3D10.0.11.146%3A8001%2Cjob%3Dkubernetes-pods%2Cnamespace%3Dflamegraph-com%2Cpod%3Dflamegraph-com-backend-79c858c7bf-jw2hn%2Cpod_template_hash%3D79c858c7bf%2Cpyroscope_tenant%3Dpyroscope%2Ctier%3Dbackend%7D&sampleRate=0&spyName=scrape&units=samples&until=1714652240 (200) 22.345191ms" +ts=2024-05-02T12:17:22.850321367Z caller=http.go:194 level=debug traceID=4fe00c7694a5c590 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.27631ms" +ts=2024-05-02T12:17:22.850116029Z caller=http.go:194 level=debug traceID=74ba70e20d1884b5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.776772ms" +ts=2024-05-02T12:17:22.850081195Z caller=http.go:194 level=debug traceID=6dcfc7ee2c237f20 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.977548ms" +ts=2024-05-02T12:17:22.849814387Z caller=http.go:194 level=debug traceID=6330806e8af7bf5b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 626.244µs" +ts=2024-05-02T12:17:22.849320149Z caller=http.go:194 level=debug traceID=73fd3d4fb4799796 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.476869ms" +ts=2024-05-02T12:17:22.846560075Z caller=http.go:194 level=debug traceID=7355d55293969ec2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.722008ms" +ts=2024-05-02T12:17:22.846180055Z caller=http.go:194 level=debug traceID=742a6b9ce9ac4d0c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.748015ms" +ts=2024-05-02T12:17:22.846004885Z caller=http.go:194 level=debug traceID=5de85a71ebb8ba1e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.826907ms" +ts=2024-05-02T12:17:22.844878412Z caller=http.go:194 level=debug traceID=74df5d525672562f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.478183ms" +ts=2024-05-02T12:17:22.842817398Z caller=http.go:194 level=debug traceID=309e2a48e4b464a9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.298569ms" +ts=2024-05-02T12:17:22.840566193Z caller=http.go:194 level=debug traceID=73fd3d4fb4799796 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.201439ms" +ts=2024-05-02T12:17:22.84071127Z caller=http.go:194 level=debug traceID=2bcc3ddffb8d28c1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.927996ms" +ts=2024-05-02T12:17:22.840153457Z caller=http.go:194 level=debug traceID=32bb307107ebca45 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 48.931149ms" +ts=2024-05-02T12:17:22.839982096Z caller=http.go:194 level=debug traceID=69e660dad539adf0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.954303ms" +ts=2024-05-02T12:17:22.838320859Z caller=http.go:194 level=debug traceID=74ba70e20d1884b5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.992278ms" +ts=2024-05-02T12:17:22.837338671Z caller=http.go:194 level=debug traceID=751f551f3865f809 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.659659ms" +ts=2024-05-02T12:17:22.836771904Z caller=http.go:194 level=debug traceID=6330806e8af7bf5b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 317.004µs" +ts=2024-05-02T12:17:22.836474534Z caller=http.go:194 level=debug traceID=59966c8ef4991091 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.520601ms" +ts=2024-05-02T12:17:22.834807507Z caller=http.go:194 level=debug traceID=5de85a71ebb8ba1e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.709157ms" +ts=2024-05-02T12:17:22.833644525Z caller=http.go:194 level=debug traceID=74df5d525672562f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.312315ms" +ts=2024-05-02T12:17:22.830605596Z caller=http.go:194 level=debug traceID=2bcc3ddffb8d28c1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.046698ms" +ts=2024-05-02T12:17:22.829652776Z caller=http.go:194 level=debug traceID=491898f5b41751a1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.878431ms" +ts=2024-05-02T12:17:22.82800214Z caller=http.go:194 level=debug traceID=69e660dad539adf0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.898125ms" +ts=2024-05-02T12:17:22.826929155Z caller=http.go:194 level=debug traceID=714f7433f48d2c5b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.191935ms" +ts=2024-05-02T12:17:22.826411145Z caller=http.go:194 level=debug traceID=751f551f3865f809 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.59514ms" +ts=2024-05-02T12:17:22.825291021Z caller=http.go:194 level=debug traceID=59966c8ef4991091 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.602838ms" +ts=2024-05-02T12:17:22.823871218Z caller=http.go:194 level=debug traceID=68cb825f0faf7457 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.200152ms" +ts=2024-05-02T12:17:22.823101913Z caller=http.go:194 level=debug traceID=48c9e1d9bfc76f24 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.950368ms" +ts=2024-05-02T12:17:22.822247863Z caller=http.go:194 level=debug traceID=5d463813440beceb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.766503ms" +ts=2024-05-02T12:17:22.821833842Z caller=http.go:194 level=debug traceID=386a337f57af2cf0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.123305ms" +ts=2024-05-02T12:17:22.819995079Z caller=http.go:194 level=debug traceID=674fbc8e38dc0ee2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.771584ms" +ts=2024-05-02T12:17:22.819547223Z caller=http.go:194 level=debug traceID=73e98e2755286ee7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.72582ms" +ts=2024-05-02T12:17:22.818036134Z caller=http.go:194 level=debug traceID=491898f5b41751a1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.455917ms" +ts=2024-05-02T12:17:22.817737901Z caller=http.go:194 level=debug traceID=04fdde92511d8928 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.580578ms" +ts=2024-05-02T12:17:22.817593715Z caller=http.go:194 level=debug traceID=3d19e23e16cc0b14 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.253999ms" +ts=2024-05-02T12:17:22.81638089Z caller=http.go:194 level=debug traceID=355f00537da43e10 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 196.956µs" +ts=2024-05-02T12:17:22.815990889Z caller=http.go:194 level=debug traceID=714f7433f48d2c5b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.399146ms" +ts=2024-05-02T12:17:22.814976625Z caller=http.go:194 level=debug traceID=261a64b7f47a7c36 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.337512ms" +ts=2024-05-02T12:17:22.813328286Z caller=http.go:194 level=debug traceID=1efe14fd10396369 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.988243ms" +ts=2024-05-02T12:17:22.812045384Z caller=http.go:194 level=debug traceID=48c9e1d9bfc76f24 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.23973ms" +ts=2024-05-02T12:17:22.811609542Z caller=http.go:194 level=debug traceID=541f4f6caad7e1cc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.21861ms" +ts=2024-05-02T12:17:22.811612293Z caller=http.go:194 level=debug traceID=68cb825f0faf7457 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.764267ms" +ts=2024-05-02T12:17:22.811512123Z caller=http.go:194 level=debug traceID=29a80b78d3ccaff5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.469192ms" +ts=2024-05-02T12:17:22.811106196Z caller=http.go:194 level=debug traceID=77ba7cbd08be13a7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.396903ms" +ts=2024-05-02T12:17:22.810729523Z caller=http.go:194 level=debug traceID=5d463813440beceb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.068722ms" +ts=2024-05-02T12:17:22.810729658Z caller=http.go:194 level=debug traceID=386a337f57af2cf0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.380958ms" +ts=2024-05-02T12:17:22.810015509Z caller=http.go:194 level=debug traceID=7777b7997c9ad6d2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 365.348µs" +ts=2024-05-02T12:17:22.808047989Z caller=http.go:194 level=debug traceID=04fdde92511d8928 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.766963ms" +ts=2024-05-02T12:17:22.807822875Z caller=http.go:194 level=debug traceID=736cd6972f6f3264 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 376.416µs" +ts=2024-05-02T12:17:22.807708973Z caller=http.go:194 level=debug traceID=73e98e2755286ee7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.629059ms" +ts=2024-05-02T12:17:22.807708781Z caller=http.go:194 level=debug traceID=674fbc8e38dc0ee2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.914993ms" +ts=2024-05-02T12:17:22.807578178Z caller=http.go:194 level=debug traceID=3d19e23e16cc0b14 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.570045ms" +ts=2024-05-02T12:17:22.806888859Z caller=http.go:194 level=debug traceID=797335dee337829f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.978865ms" +ts=2024-05-02T12:17:22.80571244Z caller=http.go:194 level=debug traceID=1d71edaf5a306140 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.956623ms" +ts=2024-05-02T12:17:22.805566023Z caller=http.go:194 level=debug traceID=355f00537da43e10 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 426.123µs" +ts=2024-05-02T12:17:22.804542163Z caller=http.go:194 level=debug traceID=30695a06b6798045 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 256.301µs" +ts=2024-05-02T12:17:22.803675068Z caller=http.go:194 level=debug traceID=5cf1c12a2331a47d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.442553ms" +ts=2024-05-02T12:17:22.80279473Z caller=http.go:194 level=debug traceID=261a64b7f47a7c36 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.740879ms" +ts=2024-05-02T12:17:22.802232746Z caller=http.go:194 level=debug traceID=1efe14fd10396369 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.866755ms" +ts=2024-05-02T12:17:22.800253974Z caller=http.go:194 level=debug traceID=29a80b78d3ccaff5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.642835ms" +ts=2024-05-02T12:17:22.80007704Z caller=http.go:194 level=debug traceID=541f4f6caad7e1cc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.914506ms" +ts=2024-05-02T12:17:22.799066064Z caller=http.go:194 level=debug traceID=1b755cf1cb82aa27 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 308.941µs" +ts=2024-05-02T12:17:22.798520729Z caller=http.go:194 level=debug traceID=7777b7997c9ad6d2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 391.636µs" +ts=2024-05-02T12:17:22.798398181Z caller=http.go:194 level=debug traceID=32bb307107ebca45 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 18.341258ms" +ts=2024-05-02T12:17:22.798324785Z caller=http.go:194 level=debug traceID=77ba7cbd08be13a7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.828169ms" +ts=2024-05-02T12:17:22.79779293Z caller=http.go:194 level=debug traceID=6ee63996b197e065 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.756065ms" +ts=2024-05-02T12:17:22.796900784Z caller=http.go:194 level=debug traceID=6d0aa3ce0c04a7b4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 41.409622ms" +ts=2024-05-02T12:17:22.796689743Z caller=http.go:194 level=debug traceID=736cd6972f6f3264 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 441.623µs" +ts=2024-05-02T12:17:22.796441059Z caller=http.go:194 level=debug traceID=0402bafcb42d82d3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.824679ms" +ts=2024-05-02T12:17:22.796424323Z caller=http.go:194 level=debug traceID=797335dee337829f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.892923ms" +ts=2024-05-02T12:17:22.796230324Z caller=http.go:194 level=debug traceID=68287545108a0020 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.790864ms" +ts=2024-05-02T12:17:22.795191727Z caller=http.go:194 level=debug traceID=1d71edaf5a306140 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.08804ms" +ts=2024-05-02T12:17:22.794219007Z caller=http.go:194 level=debug traceID=7bfe40516e42c00c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.561571ms" +ts=2024-05-02T12:17:22.79324554Z caller=http.go:194 level=debug traceID=30695a06b6798045 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 388.028µs" +ts=2024-05-02T12:17:22.793271284Z caller=http.go:194 level=debug traceID=07f43a3484ee0551 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.865767ms" +ts=2024-05-02T12:17:22.792293141Z caller=http.go:194 level=debug traceID=5cf1c12a2331a47d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.31242ms" +ts=2024-05-02T12:17:22.789633903Z caller=http.go:194 level=debug traceID=4fe7ea5678a917f1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.799019ms" +ts=2024-05-02T12:17:22.788888581Z caller=http.go:194 level=debug traceID=6a746df4d0e74c3f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.232847ms" +ts=2024-05-02T12:17:22.788851739Z caller=http.go:194 level=debug traceID=1287ca80e59a4c0d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.733456ms" +ts=2024-05-02T12:17:22.787799095Z caller=http.go:194 level=debug traceID=194b9f3c6677ef10 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.889052ms" +ts=2024-05-02T12:17:22.787534621Z caller=http.go:194 level=debug traceID=1b755cf1cb82aa27 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 295.53µs" +ts=2024-05-02T12:17:22.787312058Z caller=http.go:194 level=debug traceID=6ee63996b197e065 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.494206ms" +ts=2024-05-02T12:17:22.785031737Z caller=http.go:194 level=debug traceID=0402bafcb42d82d3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.992371ms" +ts=2024-05-02T12:17:22.784879255Z caller=http.go:194 level=debug traceID=3b4b498f000a6dff orgID=3648 msg="POST /push.v1.PusherService/Push (200) 227.349µs" +ts=2024-05-02T12:17:22.784273694Z caller=http.go:194 level=debug traceID=7bfe40516e42c00c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.613251ms" +ts=2024-05-02T12:17:22.783697473Z caller=http.go:194 level=debug traceID=68287545108a0020 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.277354ms" +ts=2024-05-02T12:17:22.782019714Z caller=http.go:194 level=debug traceID=07f43a3484ee0551 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.729647ms" +ts=2024-05-02T12:17:22.78185473Z caller=http.go:194 level=debug traceID=16c5532fc15bc504 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.532372ms" +ts=2024-05-02T12:17:22.780695729Z caller=http.go:194 level=debug traceID=401aa1272895d3aa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.191062ms" +ts=2024-05-02T12:17:22.779770812Z caller=http.go:194 level=debug traceID=4950face542436fb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.892387ms" +ts=2024-05-02T12:17:22.779174198Z caller=http.go:194 level=debug traceID=797dcdb388a22130 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 45.503857ms" +ts=2024-05-02T12:17:22.778862203Z caller=http.go:194 level=debug traceID=4fe7ea5678a917f1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.319821ms" +ts=2024-05-02T12:17:22.778024373Z caller=http.go:194 level=debug traceID=6a746df4d0e74c3f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.650458ms" +ts=2024-05-02T12:17:22.777683275Z caller=http.go:194 level=debug traceID=1287ca80e59a4c0d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.909713ms" +ts=2024-05-02T12:17:22.777094564Z caller=http.go:194 level=debug traceID=194b9f3c6677ef10 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.418684ms" +ts=2024-05-02T12:17:22.776562607Z caller=http.go:194 level=debug traceID=41f14befccdcd9ec orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.023881ms" +ts=2024-05-02T12:17:22.77566592Z caller=http.go:194 level=debug traceID=2b9f0ed81fe8c47f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 367.26µs" +ts=2024-05-02T12:17:22.774825354Z caller=http.go:194 level=debug traceID=3b4b498f000a6dff orgID=1218 msg="POST /push.v1.PusherService/Push (200) 389.523µs" +ts=2024-05-02T12:17:22.774289589Z caller=http.go:194 level=debug traceID=650bfbfa51e69b56 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.166859ms" +ts=2024-05-02T12:17:22.772915948Z caller=http.go:194 level=debug traceID=211745141030fcab orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.771854ms" +ts=2024-05-02T12:17:22.771471728Z caller=http.go:194 level=debug traceID=0c16c41258823210 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.216079ms" +ts=2024-05-02T12:17:22.770181629Z caller=http.go:194 level=debug traceID=401aa1272895d3aa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.994703ms" +ts=2024-05-02T12:17:22.769571594Z caller=http.go:194 level=debug traceID=16c5532fc15bc504 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.43555ms" +ts=2024-05-02T12:17:22.768404021Z caller=http.go:194 level=debug traceID=4950face542436fb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.797129ms" +ts=2024-05-02T12:17:22.767941932Z caller=http.go:194 level=debug traceID=2fb4d94fab9c97c3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.651505ms" +ts=2024-05-02T12:17:22.767796542Z caller=http.go:194 level=debug traceID=71568703ba22bf7f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.904201ms" +ts=2024-05-02T12:17:22.767779451Z caller=http.go:194 level=debug traceID=07a0ee25928d73de orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.271947ms" +ts=2024-05-02T12:17:22.767691328Z caller=http.go:194 level=debug traceID=21c8bfeb29d1c9b1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.536291ms" +ts=2024-05-02T12:17:22.766097904Z caller=http.go:194 level=debug traceID=18de87b4a9fa962e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.176372ms" +ts=2024-05-02T12:17:22.765374609Z caller=http.go:194 level=debug traceID=284c2139c59a8896 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.268144ms" +ts=2024-05-02T12:17:22.765282016Z caller=http.go:194 level=debug traceID=492b53b2f7695060 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.102447ms" +ts=2024-05-02T12:17:22.765015635Z caller=http.go:194 level=debug traceID=7c98a9c09801a9f0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.481033ms" +ts=2024-05-02T12:17:22.764118908Z caller=http.go:194 level=debug traceID=41f14befccdcd9ec orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.962112ms" +ts=2024-05-02T12:17:22.763889623Z caller=http.go:194 level=debug traceID=2b9f0ed81fe8c47f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 330.276µs" +ts=2024-05-02T12:17:22.763406548Z caller=http.go:194 level=debug traceID=79025a1b67bd808b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 325.881µs" +ts=2024-05-02T12:17:22.763217268Z caller=http.go:194 level=debug traceID=650bfbfa51e69b56 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.208813ms" +ts=2024-05-02T12:17:22.762570749Z caller=http.go:194 level=debug traceID=2639c9267f4a55d6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.018747ms" +ts=2024-05-02T12:17:22.761835012Z caller=http.go:194 level=debug traceID=211745141030fcab orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.698703ms" +ts=2024-05-02T12:17:22.761239921Z caller=http.go:194 level=debug traceID=11bdf937b293635d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.401801ms" +ts=2024-05-02T12:17:22.761112122Z caller=http.go:194 level=debug traceID=4eea02a27b58953f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.721364ms" +ts=2024-05-02T12:17:22.761042384Z caller=http.go:194 level=debug traceID=6d0aa3ce0c04a7b4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.750391ms" +ts=2024-05-02T12:17:22.761069982Z caller=http.go:194 level=debug traceID=18348716a16270c9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.445866ms" +ts=2024-05-02T12:17:22.759702159Z caller=http.go:194 level=debug traceID=0c16c41258823210 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.910426ms" +ts=2024-05-02T12:17:22.759437062Z caller=http.go:194 level=debug traceID=1a65c01e8e25398e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.255108ms" +ts=2024-05-02T12:17:22.75847356Z caller=http.go:194 level=debug traceID=2dae0f19f61fc981 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.632471ms" +ts=2024-05-02T12:17:22.757706166Z caller=http.go:194 level=debug traceID=21c8bfeb29d1c9b1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.703831ms" +ts=2024-05-02T12:17:22.757062438Z caller=http.go:194 level=debug traceID=07a0ee25928d73de orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.148025ms" +ts=2024-05-02T12:17:22.756852738Z caller=http.go:194 level=debug traceID=65cc41a21ddcfb9d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.182149ms" +ts=2024-05-02T12:17:22.756613251Z caller=http.go:194 level=debug traceID=2fb4d94fab9c97c3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.473439ms" +ts=2024-05-02T12:17:22.756450797Z caller=http.go:194 level=debug traceID=64103d15ac594857 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.741838ms" +ts=2024-05-02T12:17:22.755963369Z caller=http.go:194 level=debug traceID=71568703ba22bf7f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.432392ms" +ts=2024-05-02T12:17:22.755592631Z caller=http.go:194 level=debug traceID=284c2139c59a8896 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.139251ms" +ts=2024-05-02T12:17:22.7548657Z caller=http.go:194 level=debug traceID=18de87b4a9fa962e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.420988ms" +ts=2024-05-02T12:17:22.754057926Z caller=http.go:194 level=debug traceID=7d922644b7e5f755 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.119329ms" +ts=2024-05-02T12:17:22.753602259Z caller=http.go:194 level=debug traceID=58896c6528d47af0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.845589ms" +ts=2024-05-02T12:17:22.753086755Z caller=http.go:194 level=debug traceID=7c98a9c09801a9f0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.813315ms" +ts=2024-05-02T12:17:22.75298773Z caller=http.go:194 level=debug traceID=0a4cb1827d8d6997 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.305299ms" +ts=2024-05-02T12:17:22.752480489Z caller=http.go:194 level=debug traceID=492b53b2f7695060 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.176041ms" +ts=2024-05-02T12:17:22.752206791Z caller=http.go:194 level=debug traceID=79025a1b67bd808b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 397.467µs" +ts=2024-05-02T12:17:22.752065565Z caller=http.go:194 level=debug traceID=2639c9267f4a55d6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.707945ms" +ts=2024-05-02T12:17:22.750826919Z caller=http.go:194 level=debug traceID=074fbdd7eb5733d2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.435182ms" +ts=2024-05-02T12:17:22.750275801Z caller=http.go:194 level=debug traceID=11bdf937b293635d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.069831ms" +ts=2024-05-02T12:17:22.750241484Z caller=http.go:194 level=debug traceID=4eea02a27b58953f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.949595ms" +ts=2024-05-02T12:17:22.749928584Z caller=http.go:194 level=debug traceID=18348716a16270c9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.482514ms" +ts=2024-05-02T12:17:22.749380408Z caller=http.go:194 level=debug traceID=1d1b86dd92c00bff orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.613133ms" +ts=2024-05-02T12:17:22.749123004Z caller=http.go:194 level=debug traceID=1a65c01e8e25398e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.975291ms" +ts=2024-05-02T12:17:22.747505134Z caller=http.go:194 level=debug traceID=2dae0f19f61fc981 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.833902ms" +ts=2024-05-02T12:17:22.747074353Z caller=http.go:194 level=debug traceID=65cc41a21ddcfb9d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 22.237511ms" +ts=2024-05-02T12:17:22.745919065Z caller=http.go:194 level=debug traceID=312d741f380a0e18 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.100193ms" +ts=2024-05-02T12:17:22.745699619Z caller=http.go:194 level=debug traceID=269ce34542c3d913 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.438813ms" +ts=2024-05-02T12:17:22.745592057Z caller=http.go:194 level=debug traceID=6960a91560d6816c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.772806ms" +ts=2024-05-02T12:17:22.745026112Z caller=http.go:194 level=debug traceID=64103d15ac594857 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.489086ms" +ts=2024-05-02T12:17:22.743597469Z caller=http.go:194 level=debug traceID=54d208ac086d4c0a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.796984ms" +ts=2024-05-02T12:17:22.742419052Z caller=http.go:194 level=debug traceID=4cf003a556b4e288 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.40806ms" +ts=2024-05-02T12:17:22.742390768Z caller=http.go:194 level=debug traceID=7d922644b7e5f755 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.898528ms" +ts=2024-05-02T12:17:22.742255783Z caller=http.go:194 level=debug traceID=0a4cb1827d8d6997 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.039818ms" +ts=2024-05-02T12:17:22.741950285Z caller=http.go:194 level=debug traceID=58896c6528d47af0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.83605ms" +ts=2024-05-02T12:17:22.741937359Z caller=http.go:194 level=debug traceID=700a3c98a98cb4e4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.646479ms" +ts=2024-05-02T12:17:22.741077968Z caller=http.go:194 level=debug traceID=5108a8d290c90b2f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.66061ms" +ts=2024-05-02T12:17:22.739707067Z caller=http.go:194 level=debug traceID=074fbdd7eb5733d2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.43764ms" +ts=2024-05-02T12:17:22.73906288Z caller=http.go:194 level=debug traceID=797dcdb388a22130 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 18.548892ms" +ts=2024-05-02T12:17:22.738614463Z caller=http.go:194 level=debug traceID=07254f7c943eed69 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.385168ms" +ts=2024-05-02T12:17:22.737875384Z caller=http.go:194 level=debug traceID=169fdf72252a2524 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.601799ms" +ts=2024-05-02T12:17:22.737343539Z caller=http.go:194 level=debug traceID=1d1b86dd92c00bff orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.045737ms" +ts=2024-05-02T12:17:22.736983205Z caller=http.go:194 level=debug traceID=66debae0bfb51c1b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.552953ms" +ts=2024-05-02T12:17:22.736907231Z caller=http.go:194 level=debug traceID=56b76eafac60583e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.568107ms" +ts=2024-05-02T12:17:22.734886534Z caller=http.go:194 level=debug traceID=269ce34542c3d913 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.174488ms" +ts=2024-05-02T12:17:22.734897594Z caller=http.go:194 level=debug traceID=6960a91560d6816c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.091285ms" +ts=2024-05-02T12:17:22.734438393Z caller=http.go:194 level=debug traceID=54d208ac086d4c0a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.611943ms" +ts=2024-05-02T12:17:22.734397275Z caller=http.go:194 level=debug traceID=312d741f380a0e18 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.855388ms" +ts=2024-05-02T12:17:22.734436856Z caller=http.go:194 level=debug traceID=7693fde3aba24ba3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.99071ms" +ts=2024-05-02T12:17:22.734303263Z caller=http.go:194 level=debug traceID=6cbb50d64d6b0a68 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.949026ms" +ts=2024-05-02T12:17:22.734153222Z caller=http.go:194 level=debug traceID=4f73b2753207bd0d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.130484ms" +ts=2024-05-02T12:17:22.731753471Z caller=http.go:194 level=debug traceID=700a3c98a98cb4e4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.505093ms" +ts=2024-05-02T12:17:22.731459762Z caller=http.go:194 level=debug traceID=624b4a84745f43a8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.516209ms" +ts=2024-05-02T12:17:22.730875953Z caller=http.go:194 level=debug traceID=4cf003a556b4e288 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.262182ms" +ts=2024-05-02T12:17:22.729256945Z caller=http.go:194 level=debug traceID=5108a8d290c90b2f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.580371ms" +ts=2024-05-02T12:17:22.729248563Z caller=http.go:194 level=debug traceID=7fb318dd865ab78f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.544694ms" +ts=2024-05-02T12:17:22.728777481Z caller=http.go:194 level=debug traceID=1901b56ab23b8759 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.815937ms" +ts=2024-05-02T12:17:22.728005973Z caller=http.go:194 level=debug traceID=2a514399f3c35a5a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.797726ms" +ts=2024-05-02T12:17:22.727611102Z caller=http.go:194 level=debug traceID=07254f7c943eed69 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.700745ms" +ts=2024-05-02T12:17:22.726753966Z caller=http.go:194 level=debug traceID=56b76eafac60583e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.458434ms" +ts=2024-05-02T12:17:22.726566014Z caller=http.go:194 level=debug traceID=169fdf72252a2524 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.529861ms" +ts=2024-05-02T12:17:22.726402772Z caller=http.go:194 level=debug traceID=66debae0bfb51c1b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.478644ms" +ts=2024-05-02T12:17:22.724740481Z caller=http.go:194 level=debug traceID=51d5136752f8e18a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.639418ms" +ts=2024-05-02T12:17:22.724138617Z caller=http.go:194 level=debug traceID=6cbb50d64d6b0a68 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.93692ms" +ts=2024-05-02T12:17:22.723773362Z caller=http.go:194 level=debug traceID=4f73b2753207bd0d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.792337ms" +ts=2024-05-02T12:17:22.722184221Z caller=http.go:194 level=debug traceID=4334c96dd45c393d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.135595ms" +ts=2024-05-02T12:17:22.722037419Z caller=http.go:194 level=debug traceID=7693fde3aba24ba3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.819392ms" +ts=2024-05-02T12:17:22.722062433Z caller=http.go:194 level=debug traceID=3faa8f6769a4db79 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.909803ms" +ts=2024-05-02T12:17:22.721410404Z caller=http.go:194 level=debug traceID=624b4a84745f43a8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.757157ms" +ts=2024-05-02T12:17:22.720011981Z caller=http.go:194 level=debug traceID=4ee8314e874e757f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.30063ms" +ts=2024-05-02T12:17:22.719526652Z caller=http.go:194 level=debug traceID=5688cb8ada612b63 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.300555ms" +ts=2024-05-02T12:17:22.719269953Z caller=http.go:194 level=debug traceID=7fb318dd865ab78f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.60978ms" +ts=2024-05-02T12:17:22.718022908Z caller=http.go:194 level=debug traceID=0a079e7592fb074d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.280041ms" +ts=2024-05-02T12:17:22.718048672Z caller=http.go:194 level=debug traceID=6d9a1fbdd83d4e8d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.098422ms" +ts=2024-05-02T12:17:22.717876813Z caller=http.go:194 level=debug traceID=1901b56ab23b8759 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.954541ms" +ts=2024-05-02T12:17:22.716955783Z caller=http.go:194 level=debug traceID=5273ee239763f8be orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.893674ms" +ts=2024-05-02T12:17:22.714500934Z caller=http.go:194 level=debug traceID=190440465b48104b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.254892ms" +ts=2024-05-02T12:17:22.714249723Z caller=http.go:194 level=debug traceID=2a514399f3c35a5a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.522063ms" +ts=2024-05-02T12:17:22.714071598Z caller=http.go:194 level=debug traceID=70866ae5c854689d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.93656ms" +ts=2024-05-02T12:17:22.713893909Z caller=http.go:194 level=debug traceID=51d5136752f8e18a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.67987ms" +ts=2024-05-02T12:17:22.713259366Z caller=http.go:194 level=debug traceID=08dc44a2798b8125 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.287785ms" +ts=2024-05-02T12:17:22.713053969Z caller=http.go:194 level=debug traceID=704a41d80e023728 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.215519ms" +ts=2024-05-02T12:17:22.712426167Z caller=http.go:194 level=debug traceID=3faa8f6769a4db79 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.429506ms" +ts=2024-05-02T12:17:22.711722535Z caller=http.go:194 level=debug traceID=4032ef4e5fe0bfb7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.005951ms" +ts=2024-05-02T12:17:22.71164956Z caller=http.go:194 level=debug traceID=547028bcb9ea8e9e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.512485ms" +ts=2024-05-02T12:17:22.710497903Z caller=http.go:194 level=debug traceID=73595a3dafbd1a86 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.419465ms" +ts=2024-05-02T12:17:22.710453251Z caller=http.go:194 level=debug traceID=2d6268726e12daac orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.475019ms" +ts=2024-05-02T12:17:22.709683829Z caller=http.go:194 level=debug traceID=4334c96dd45c393d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.532728ms" +ts=2024-05-02T12:17:22.709101666Z caller=http.go:194 level=debug traceID=4ee8314e874e757f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.353534ms" +ts=2024-05-02T12:17:22.708802786Z caller=http.go:194 level=debug traceID=53cb91f44cc77504 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.092948ms" +ts=2024-05-02T12:17:22.708716923Z caller=http.go:194 level=debug traceID=2d10b8705ede1670 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.194608ms" +ts=2024-05-02T12:17:22.708564071Z caller=http.go:194 level=debug traceID=5688cb8ada612b63 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.699342ms" +ts=2024-05-02T12:17:22.707182869Z caller=http.go:194 level=debug traceID=5273ee239763f8be orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.918941ms" +ts=2024-05-02T12:17:22.707110774Z caller=http.go:194 level=debug traceID=7da519f92feb8b24 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.296435ms" +ts=2024-05-02T12:17:22.707085071Z caller=http.go:194 level=debug traceID=7b1ff8bc375c4405 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.92943ms" +ts=2024-05-02T12:17:22.706717555Z caller=http.go:194 level=debug traceID=6d9a1fbdd83d4e8d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.8308ms" +ts=2024-05-02T12:17:22.706290659Z caller=http.go:194 level=debug traceID=0a079e7592fb074d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.927944ms" +ts=2024-05-02T12:17:22.705269332Z caller=http.go:194 level=debug traceID=2aec8c2db718b249 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.115865ms" +ts=2024-05-02T12:17:22.705255984Z caller=http.go:194 level=debug traceID=7d3e4c2b447533bf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.515747ms" +ts=2024-05-02T12:17:22.704191833Z caller=http.go:194 level=debug traceID=190440465b48104b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.422995ms" +ts=2024-05-02T12:17:22.703241002Z caller=http.go:194 level=debug traceID=29f7a8815e67ce82 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.976478ms" +ts=2024-05-02T12:17:22.702819977Z caller=http.go:194 level=debug traceID=09d7f6e264e2c7fb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.318501ms" +ts=2024-05-02T12:17:22.702728745Z caller=http.go:194 level=debug traceID=08aadfcdc88a679a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 379.55µs" +ts=2024-05-02T12:17:22.702546471Z caller=http.go:194 level=debug traceID=08dc44a2798b8125 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.572013ms" +ts=2024-05-02T12:17:22.702505992Z caller=http.go:194 level=debug traceID=70866ae5c854689d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.610466ms" +ts=2024-05-02T12:17:22.7024074Z caller=http.go:194 level=debug traceID=3e2892f6209117fa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.233539ms" +ts=2024-05-02T12:17:22.701938069Z caller=http.go:194 level=debug traceID=23e3e758a97d9111 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.320008ms" +ts=2024-05-02T12:17:22.7014005Z caller=http.go:194 level=debug traceID=48297741622a6303 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.823949ms" +ts=2024-05-02T12:17:22.701107276Z caller=http.go:194 level=debug traceID=704a41d80e023728 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.921456ms" +ts=2024-05-02T12:17:22.70093501Z caller=http.go:194 level=debug traceID=547028bcb9ea8e9e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.858696ms" +ts=2024-05-02T12:17:22.700812867Z caller=http.go:194 level=debug traceID=12b934c6c5560d62 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.812724ms" +ts=2024-05-02T12:17:22.699424366Z caller=http.go:194 level=debug traceID=4032ef4e5fe0bfb7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.726532ms" +ts=2024-05-02T12:17:22.699408246Z caller=http.go:194 level=debug traceID=2d6268726e12daac orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.69295ms" +ts=2024-05-02T12:17:22.698696685Z caller=http.go:194 level=debug traceID=53cb91f44cc77504 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.663182ms" +ts=2024-05-02T12:17:22.69831426Z caller=http.go:194 level=debug traceID=2d10b8705ede1670 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.730247ms" +ts=2024-05-02T12:17:22.698227188Z caller=http.go:194 level=debug traceID=73595a3dafbd1a86 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.834681ms" +ts=2024-05-02T12:17:22.696263394Z caller=http.go:194 level=debug traceID=7d3e4c2b447533bf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.400626ms" +ts=2024-05-02T12:17:22.695785868Z caller=http.go:194 level=debug traceID=450060f87e51dd37 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.495986ms" +ts=2024-05-02T12:17:22.695415877Z caller=http.go:194 level=debug traceID=5744239febab0a0f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.758892ms" +ts=2024-05-02T12:17:22.695327238Z caller=http.go:194 level=debug traceID=4a6647a1324aaf53 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.255972ms" +ts=2024-05-02T12:17:22.695173419Z caller=http.go:194 level=debug traceID=7b1ff8bc375c4405 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.302194ms" +ts=2024-05-02T12:17:22.695018432Z caller=http.go:194 level=debug traceID=5679f6dc160ce4c3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.118715ms" +ts=2024-05-02T12:17:22.695029157Z caller=http.go:194 level=debug traceID=2aec8c2db718b249 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.977242ms" +ts=2024-05-02T12:17:22.694266627Z caller=http.go:194 level=debug traceID=7da519f92feb8b24 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.113615ms" +ts=2024-05-02T12:17:22.692990917Z caller=http.go:194 level=debug traceID=00f26d25ed3f1ed0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.498086ms" +ts=2024-05-02T12:17:22.692178455Z caller=http.go:194 level=debug traceID=6dfd65fdbd14481c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.411132ms" +ts=2024-05-02T12:17:22.691988641Z caller=http.go:194 level=debug traceID=29f7a8815e67ce82 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.873522ms" +ts=2024-05-02T12:17:22.691819101Z caller=http.go:194 level=debug traceID=08aadfcdc88a679a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 525.846µs" +ts=2024-05-02T12:17:22.691341564Z caller=http.go:194 level=debug traceID=7785e4a69427b244 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.383838ms" +ts=2024-05-02T12:17:22.691063744Z caller=http.go:194 level=debug traceID=09d7f6e264e2c7fb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.098528ms" +ts=2024-05-02T12:17:22.690911239Z caller=http.go:194 level=debug traceID=3e2892f6209117fa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.796975ms" +ts=2024-05-02T12:17:22.690272377Z caller=http.go:194 level=debug traceID=23e3e758a97d9111 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.945593ms" +ts=2024-05-02T12:17:22.690152442Z caller=http.go:194 level=debug traceID=48297741622a6303 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.784662ms" +ts=2024-05-02T12:17:22.690037502Z caller=http.go:194 level=debug traceID=54f1276a8e4bddf1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 15.566121ms" +ts=2024-05-02T12:17:22.688876907Z caller=http.go:194 level=debug traceID=12b934c6c5560d62 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.62633ms" +ts=2024-05-02T12:17:22.688606005Z caller=http.go:194 level=debug traceID=1d7ca9fdb64a0dc0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.865685ms" +ts=2024-05-02T12:17:22.687714083Z caller=http.go:194 level=debug traceID=0d016e3b898803af orgID=3648 msg="POST /push.v1.PusherService/Push (200) 358.032µs" +ts=2024-05-02T12:17:22.687370808Z caller=http.go:194 level=debug traceID=6077ca14dffa0a55 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.036981ms" +ts=2024-05-02T12:17:22.686354979Z caller=http.go:194 level=debug traceID=242e4d356d986b7f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.96641ms" +ts=2024-05-02T12:17:22.685824673Z caller=http.go:194 level=debug traceID=2906c587cfddc9ce orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.537766ms" +ts=2024-05-02T12:17:22.685338721Z caller=http.go:194 level=debug traceID=450060f87e51dd37 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.695ms" +ts=2024-05-02T12:17:22.684713019Z caller=http.go:194 level=debug traceID=6dfd65fdbd14481c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.748956ms" +ts=2024-05-02T12:17:22.684611162Z caller=http.go:194 level=debug traceID=19ca0d0163dc4bc3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.994583ms" +ts=2024-05-02T12:17:22.684145891Z caller=http.go:194 level=debug traceID=5744239febab0a0f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.320968ms" +ts=2024-05-02T12:17:22.684076192Z caller=http.go:194 level=debug traceID=0510e33517e080a8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.277545ms" +ts=2024-05-02T12:17:22.683505217Z caller=http.go:194 level=debug traceID=5824a7be1c4c2863 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.619346ms" +ts=2024-05-02T12:17:22.683222707Z caller=http.go:194 level=debug traceID=5dd93386b8556cee orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.444164ms" +ts=2024-05-02T12:17:22.683113907Z caller=http.go:194 level=debug traceID=4a6647a1324aaf53 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.750963ms" +ts=2024-05-02T12:17:22.682653796Z caller=http.go:194 level=debug traceID=5679f6dc160ce4c3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.693564ms" +ts=2024-05-02T12:17:22.682454762Z caller=http.go:194 level=debug traceID=00f26d25ed3f1ed0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.22848ms" +ts=2024-05-02T12:17:22.682081336Z caller=http.go:194 level=debug traceID=68b8b7f034fd32e3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.830381ms" +ts=2024-05-02T12:17:22.681903131Z caller=http.go:194 level=debug traceID=7785e4a69427b244 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.614995ms" +ts=2024-05-02T12:17:22.681151728Z caller=http.go:194 level=debug traceID=28e2d6801438f452 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.90125ms" +ts=2024-05-02T12:17:22.680521149Z caller=http.go:194 level=debug traceID=4df3df0734d95365 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.993118ms" +ts=2024-05-02T12:17:22.678517151Z caller=http.go:194 level=debug traceID=05806096ee9a72bd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.217048ms" +ts=2024-05-02T12:17:22.678162467Z caller=http.go:194 level=debug traceID=78e0de05fb32b24d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.874825ms" +ts=2024-05-02T12:17:22.676745184Z caller=http.go:194 level=debug traceID=0d016e3b898803af orgID=1218 msg="POST /push.v1.PusherService/Push (200) 442.445µs" +ts=2024-05-02T12:17:22.676294365Z caller=http.go:194 level=debug traceID=760105fef8d037c1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 255.711µs" +ts=2024-05-02T12:17:22.675357482Z caller=http.go:194 level=debug traceID=54f1276a8e4bddf1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.298341ms" +ts=2024-05-02T12:17:22.675009813Z caller=http.go:194 level=debug traceID=242e4d356d986b7f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.7598ms" +ts=2024-05-02T12:17:22.674778052Z caller=http.go:194 level=debug traceID=6077ca14dffa0a55 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.43272ms" +ts=2024-05-02T12:17:22.67463273Z caller=http.go:194 level=debug traceID=1d7ca9fdb64a0dc0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.516406ms" +ts=2024-05-02T12:17:22.674400363Z caller=http.go:194 level=debug traceID=33ed5272ab024293 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.083578ms" +ts=2024-05-02T12:17:22.673946631Z caller=http.go:194 level=debug traceID=57294f0a21e19d5f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.482348ms" +ts=2024-05-02T12:17:22.673641985Z caller=http.go:194 level=debug traceID=2906c587cfddc9ce orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.793866ms" +ts=2024-05-02T12:17:22.673081726Z caller=http.go:194 level=debug traceID=19ca0d0163dc4bc3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.842521ms" +ts=2024-05-02T12:17:22.672082203Z caller=http.go:194 level=debug traceID=6eed2930f6d77a81 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.652695ms" +ts=2024-05-02T12:17:22.671933423Z caller=http.go:194 level=debug traceID=0510e33517e080a8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.675892ms" +ts=2024-05-02T12:17:22.671859031Z caller=http.go:194 level=debug traceID=4bb1d080fc934886 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.906806ms" +ts=2024-05-02T12:17:22.671486335Z caller=http.go:194 level=debug traceID=5dd93386b8556cee orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.958257ms" +ts=2024-05-02T12:17:22.671382445Z caller=http.go:194 level=debug traceID=68b8b7f034fd32e3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.10328ms" +ts=2024-05-02T12:17:22.671305245Z caller=http.go:194 level=debug traceID=5824a7be1c4c2863 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.216008ms" +ts=2024-05-02T12:17:22.67056561Z caller=http.go:194 level=debug traceID=4df3df0734d95365 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.208064ms" +ts=2024-05-02T12:17:22.669848269Z caller=http.go:194 level=debug traceID=24b233adf14416af orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.884171ms" +ts=2024-05-02T12:17:22.669584196Z caller=http.go:194 level=debug traceID=59d45a8652862d3b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.409188ms" +ts=2024-05-02T12:17:22.66766096Z caller=http.go:194 level=debug traceID=05806096ee9a72bd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.683545ms" +ts=2024-05-02T12:17:22.667305677Z caller=http.go:194 level=debug traceID=28e2d6801438f452 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.4426ms" +ts=2024-05-02T12:17:22.667296549Z caller=http.go:194 level=debug traceID=78e0de05fb32b24d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.137427ms" +ts=2024-05-02T12:17:22.666294696Z caller=http.go:194 level=debug traceID=760105fef8d037c1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 387.877µs" +ts=2024-05-02T12:17:22.665781409Z caller=http.go:194 level=debug traceID=205fc25e84ea8aa8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.604484ms" +ts=2024-05-02T12:17:22.665192951Z caller=http.go:194 level=debug traceID=57294f0a21e19d5f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.490351ms" +ts=2024-05-02T12:17:22.66405237Z caller=http.go:194 level=debug traceID=19b4b5549a1a244e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.566203ms" +ts=2024-05-02T12:17:22.663553021Z caller=http.go:194 level=debug traceID=7b82daafecc7555d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.129165ms" +ts=2024-05-02T12:17:22.663354066Z caller=http.go:194 level=debug traceID=7b94f8a4f209828f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.163427ms" +ts=2024-05-02T12:17:22.66243496Z caller=http.go:194 level=debug traceID=33ed5272ab024293 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.493837ms" +ts=2024-05-02T12:17:22.660986536Z caller=http.go:194 level=debug traceID=1b32e9b6e4377ea0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.964043ms" +ts=2024-05-02T12:17:22.660383183Z caller=http.go:194 level=debug traceID=4bb1d080fc934886 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.503071ms" +ts=2024-05-02T12:17:22.659899813Z caller=http.go:194 level=debug traceID=6eed2930f6d77a81 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.369395ms" +ts=2024-05-02T12:17:22.659847805Z caller=http.go:194 level=debug traceID=391acc520a12b425 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.892497ms" +ts=2024-05-02T12:17:22.659588593Z caller=http.go:194 level=debug traceID=24b233adf14416af orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.770641ms" +ts=2024-05-02T12:17:22.65944889Z caller=http.go:194 level=debug traceID=7f80f9233df969f4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.170138ms" +ts=2024-05-02T12:17:22.657854001Z caller=http.go:194 level=debug traceID=466780b547377328 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.202461ms" +ts=2024-05-02T12:17:22.657582037Z caller=http.go:194 level=debug traceID=59d45a8652862d3b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.853876ms" +ts=2024-05-02T12:17:22.656187024Z caller=http.go:194 level=debug traceID=1a2e4b1bef2a43d6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 275.583µs" +ts=2024-05-02T12:17:22.655696254Z caller=http.go:194 level=debug traceID=0bc84103d1ccf308 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.210165ms" +ts=2024-05-02T12:17:22.655358378Z caller=http.go:194 level=debug traceID=788c083b5784cd94 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.445714ms" +ts=2024-05-02T12:17:22.654571724Z caller=http.go:194 level=debug traceID=5db9cd672ceabc11 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.24565ms" +ts=2024-05-02T12:17:22.654251296Z caller=http.go:194 level=debug traceID=73a58c13e22c8b05 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.207169ms" +ts=2024-05-02T12:17:22.654291478Z caller=http.go:194 level=debug traceID=23d6efe934c9ad0c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.919343ms" +ts=2024-05-02T12:17:22.654070299Z caller=http.go:194 level=debug traceID=205fc25e84ea8aa8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.947048ms" +ts=2024-05-02T12:17:22.65331758Z caller=http.go:194 level=debug traceID=7b94f8a4f209828f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.880524ms" +ts=2024-05-02T12:17:22.653177307Z caller=http.go:194 level=debug traceID=19b4b5549a1a244e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.59058ms" +ts=2024-05-02T12:17:22.651824459Z caller=http.go:194 level=debug traceID=29a811110cfa0ff4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 341.574µs" +ts=2024-05-02T12:17:22.650598454Z caller=http.go:194 level=debug traceID=7b82daafecc7555d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.753412ms" +ts=2024-05-02T12:17:22.649749176Z caller=http.go:194 level=debug traceID=1b32e9b6e4377ea0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.73735ms" +ts=2024-05-02T12:17:22.648177432Z caller=http.go:194 level=debug traceID=2772f60ed1e1d550 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.737471ms" +ts=2024-05-02T12:17:22.64806125Z caller=http.go:194 level=debug traceID=7f80f9233df969f4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.837489ms" +ts=2024-05-02T12:17:22.647924137Z caller=http.go:194 level=debug traceID=689331b884dee140 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.159577ms" +ts=2024-05-02T12:17:22.647217678Z caller=http.go:194 level=debug traceID=1355533e3ea44ad6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.671524ms" +ts=2024-05-02T12:17:22.646940154Z caller=http.go:194 level=debug traceID=31af6d4be8818283 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.593339ms" +ts=2024-05-02T12:17:22.646561992Z caller=http.go:194 level=debug traceID=391acc520a12b425 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.218472ms" +ts=2024-05-02T12:17:22.646421632Z caller=http.go:194 level=debug traceID=466780b547377328 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.038473ms" +ts=2024-05-02T12:17:22.646240446Z caller=http.go:194 level=debug traceID=641aec0194d464f3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.424835ms" +ts=2024-05-02T12:17:22.646081405Z caller=http.go:194 level=debug traceID=1a2e4b1bef2a43d6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 290.047µs" +ts=2024-05-02T12:17:22.645380634Z caller=http.go:194 level=debug traceID=788c083b5784cd94 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.259162ms" +ts=2024-05-02T12:17:22.64509293Z caller=http.go:194 level=debug traceID=3214c81b0200c0f4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.399706ms" +ts=2024-05-02T12:17:22.645014527Z caller=http.go:194 level=debug traceID=134b22acadf19dd3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.152803ms" +ts=2024-05-02T12:17:22.644874276Z caller=http.go:194 level=debug traceID=4b8403b843280d43 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.892162ms" +ts=2024-05-02T12:17:22.644712985Z caller=http.go:194 level=debug traceID=605c4f66f4a54c09 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.417618ms" +ts=2024-05-02T12:17:22.644440033Z caller=http.go:194 level=debug traceID=5db9cd672ceabc11 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.833571ms" +ts=2024-05-02T12:17:22.64433272Z caller=http.go:194 level=debug traceID=0bc84103d1ccf308 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.068033ms" +ts=2024-05-02T12:17:22.642523525Z caller=http.go:194 level=debug traceID=73a58c13e22c8b05 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.576219ms" +ts=2024-05-02T12:17:22.642381131Z caller=http.go:194 level=debug traceID=11ab734448ea1655 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.675422ms" +ts=2024-05-02T12:17:22.642420823Z caller=http.go:194 level=debug traceID=23d6efe934c9ad0c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.647617ms" +ts=2024-05-02T12:17:22.640938628Z caller=http.go:194 level=debug traceID=29a811110cfa0ff4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 564.957µs" +ts=2024-05-02T12:17:22.640899008Z caller=http.go:194 level=debug traceID=67bee94dabb3fe44 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.946865ms" +ts=2024-05-02T12:17:22.640421648Z caller=http.go:194 level=debug traceID=58ac411184be0b24 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.851639ms" +ts=2024-05-02T12:17:22.639643444Z caller=http.go:194 level=debug traceID=19953b5c1d4d006b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.667064ms" +ts=2024-05-02T12:17:22.638270084Z caller=http.go:194 level=debug traceID=2772f60ed1e1d550 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.817222ms" +ts=2024-05-02T12:17:22.638115845Z caller=http.go:194 level=debug traceID=60efabe5309a1589 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.738375ms" +ts=2024-05-02T12:17:22.637501257Z caller=http.go:194 level=debug traceID=689331b884dee140 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.033113ms" +ts=2024-05-02T12:17:22.637356633Z caller=http.go:194 level=debug traceID=78384007ae1e717b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.341203ms" +ts=2024-05-02T12:17:22.637114818Z caller=http.go:194 level=debug traceID=61e6f290cfd0b663 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.752003ms" +ts=2024-05-02T12:17:22.63590195Z caller=http.go:194 level=debug traceID=1355533e3ea44ad6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.428842ms" +ts=2024-05-02T12:17:22.635548214Z caller=http.go:194 level=debug traceID=641aec0194d464f3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 12.212343ms" +ts=2024-05-02T12:17:22.635527747Z caller=http.go:194 level=debug traceID=31af6d4be8818283 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.894388ms" +ts=2024-05-02T12:17:22.635332368Z caller=http.go:194 level=debug traceID=134b22acadf19dd3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.773288ms" +ts=2024-05-02T12:17:22.634273004Z caller=http.go:194 level=debug traceID=168b7d9e6c4c1b0e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.974ms" +ts=2024-05-02T12:17:22.634150576Z caller=http.go:194 level=debug traceID=605c4f66f4a54c09 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.929553ms" +ts=2024-05-02T12:17:22.633919678Z caller=http.go:194 level=debug traceID=3214c81b0200c0f4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.721501ms" +ts=2024-05-02T12:17:22.633800821Z caller=http.go:194 level=debug traceID=1f1e369866ca952b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.178372ms" +ts=2024-05-02T12:17:22.633652385Z caller=http.go:194 level=debug traceID=4a7ea22d9fbee66b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.965935ms" +ts=2024-05-02T12:17:22.633554676Z caller=http.go:194 level=debug traceID=4b8403b843280d43 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.77063ms" +ts=2024-05-02T12:17:22.633164373Z caller=http.go:194 level=debug traceID=6a7541a6f699bb3f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.465769ms" +ts=2024-05-02T12:17:22.630966903Z caller=http.go:194 level=debug traceID=11ab734448ea1655 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.682902ms" +ts=2024-05-02T12:17:22.630840656Z caller=http.go:194 level=debug traceID=5feb62e41d84a248 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.183117ms" +ts=2024-05-02T12:17:22.630550333Z caller=http.go:194 level=debug traceID=67bee94dabb3fe44 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.489214ms" +ts=2024-05-02T12:17:22.630233694Z caller=http.go:194 level=debug traceID=58ac411184be0b24 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.946118ms" +ts=2024-05-02T12:17:22.629911448Z caller=http.go:194 level=debug traceID=5922a6db6902a8d6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.609163ms" +ts=2024-05-02T12:17:22.629510339Z caller=http.go:194 level=debug traceID=5ee7d1614edffaed orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.427979ms" +ts=2024-05-02T12:17:22.62925235Z caller=http.go:194 level=debug traceID=187bfa6c5f628791 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.666475ms" +ts=2024-05-02T12:17:22.628580173Z caller=http.go:194 level=debug traceID=72958bfa80ddeda5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.355029ms" +ts=2024-05-02T12:17:22.628363362Z caller=http.go:194 level=debug traceID=55aec23a82cc6b7b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.874165ms" +ts=2024-05-02T12:17:22.627799601Z caller=http.go:194 level=debug traceID=05af42e4fef37cdc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.177128ms" +ts=2024-05-02T12:17:22.627629212Z caller=http.go:194 level=debug traceID=19953b5c1d4d006b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.15367ms" +ts=2024-05-02T12:17:22.627649251Z caller=http.go:194 level=debug traceID=31ac6d8e58d05c89 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.539118ms" +ts=2024-05-02T12:17:22.626914867Z caller=http.go:194 level=debug traceID=60efabe5309a1589 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.134855ms" +ts=2024-05-02T12:17:22.62651449Z caller=http.go:194 level=debug traceID=576edc62fc955147 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.503694ms" +ts=2024-05-02T12:17:22.626183104Z caller=http.go:194 level=debug traceID=78384007ae1e717b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.429271ms" +ts=2024-05-02T12:17:22.626141632Z caller=http.go:194 level=debug traceID=063c58b26f314d4d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.568837ms" +ts=2024-05-02T12:17:22.62566182Z caller=http.go:194 level=debug traceID=61e6f290cfd0b663 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.974536ms" +ts=2024-05-02T12:17:22.625303451Z caller=http.go:194 level=debug traceID=533ff53d59d95447 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 22.762ms" +ts=2024-05-02T12:17:22.625361933Z caller=http.go:194 level=debug traceID=1403fba4505dcc9f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.47837ms" +ts=2024-05-02T12:17:22.625266563Z caller=http.go:194 level=debug traceID=6a7541a6f699bb3f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.539827ms" +ts=2024-05-02T12:17:22.624359616Z caller=http.go:194 level=debug traceID=53da076ea9d61c79 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.244697ms" +ts=2024-05-02T12:17:22.623737767Z caller=http.go:194 level=debug traceID=168b7d9e6c4c1b0e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.761149ms" +ts=2024-05-02T12:17:22.623373891Z caller=http.go:194 level=debug traceID=4a7ea22d9fbee66b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.953055ms" +ts=2024-05-02T12:17:22.623381482Z caller=http.go:194 level=debug traceID=7a73aa34db7e2f70 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.462646ms" +ts=2024-05-02T12:17:22.622673128Z caller=http.go:194 level=debug traceID=1f1e369866ca952b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.655753ms" +ts=2024-05-02T12:17:22.622560617Z caller=http.go:194 level=debug traceID=55c588c669a8f89b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.935895ms" +ts=2024-05-02T12:17:22.621981737Z caller=http.go:194 level=debug traceID=22d052403e953527 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.678318ms" +ts=2024-05-02T12:17:22.620974228Z caller=http.go:194 level=debug traceID=1db28c7cbc3e2628 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.221891ms" +ts=2024-05-02T12:17:22.619528875Z caller=http.go:194 level=debug traceID=3f77185afb96c9f3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.568238ms" +ts=2024-05-02T12:17:22.61901633Z caller=http.go:194 level=debug traceID=5feb62e41d84a248 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.958473ms" +ts=2024-05-02T12:17:22.618495767Z caller=http.go:194 level=debug traceID=7b4f80d1ee25ff1f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.075406ms" +ts=2024-05-02T12:17:22.617929987Z caller=http.go:194 level=debug traceID=5922a6db6902a8d6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.969148ms" +ts=2024-05-02T12:17:22.617893571Z caller=http.go:194 level=debug traceID=187bfa6c5f628791 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.546345ms" +ts=2024-05-02T12:17:22.617760032Z caller=http.go:194 level=debug traceID=05af42e4fef37cdc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.190909ms" +ts=2024-05-02T12:17:22.617189677Z caller=http.go:194 level=debug traceID=5ee7d1614edffaed orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.163423ms" +ts=2024-05-02T12:17:22.617247674Z caller=http.go:194 level=debug traceID=51329773e2214062 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.469136ms" +ts=2024-05-02T12:17:22.616706057Z caller=http.go:194 level=debug traceID=31ac6d8e58d05c89 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.681659ms" +ts=2024-05-02T12:17:22.616500295Z caller=http.go:194 level=debug traceID=72958bfa80ddeda5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.615354ms" +ts=2024-05-02T12:17:22.616315972Z caller=http.go:194 level=debug traceID=55aec23a82cc6b7b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.682798ms" +ts=2024-05-02T12:17:22.615595489Z caller=http.go:194 level=debug traceID=576edc62fc955147 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.576049ms" +ts=2024-05-02T12:17:22.61497917Z caller=http.go:194 level=debug traceID=1403fba4505dcc9f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.182921ms" +ts=2024-05-02T12:17:22.614330598Z caller=http.go:194 level=debug traceID=063c58b26f314d4d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.129818ms" +ts=2024-05-02T12:17:22.613144524Z caller=http.go:194 level=debug traceID=0cca6e76e9ada013 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.722892ms" +ts=2024-05-02T12:17:22.612928578Z caller=http.go:194 level=debug traceID=53da076ea9d61c79 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.702027ms" +ts=2024-05-02T12:17:22.61283527Z caller=http.go:194 level=debug traceID=7a73aa34db7e2f70 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.178575ms" +ts=2024-05-02T12:17:22.61196337Z caller=http.go:194 level=debug traceID=34a829de7d2e760d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.477378ms" +ts=2024-05-02T12:17:22.611386472Z caller=http.go:194 level=debug traceID=1db28c7cbc3e2628 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.686033ms" +ts=2024-05-02T12:17:22.611041292Z caller=http.go:194 level=debug traceID=64d3a6067cabecc1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.642597ms" +ts=2024-05-02T12:17:22.61055461Z caller=http.go:194 level=debug traceID=22d052403e953527 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.586038ms" +ts=2024-05-02T12:17:22.610532761Z caller=http.go:194 level=debug traceID=2e1e9dd443b502de orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.797578ms" +ts=2024-05-02T12:17:22.61024174Z caller=http.go:194 level=debug traceID=55c588c669a8f89b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.931875ms" +ts=2024-05-02T12:17:22.610091077Z caller=http.go:194 level=debug traceID=3f77185afb96c9f3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.136412ms" +ts=2024-05-02T12:17:22.609353367Z caller=http.go:194 level=debug traceID=18b392f80e2c6236 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.611868ms" +ts=2024-05-02T12:17:22.608721103Z caller=http.go:194 level=debug traceID=51329773e2214062 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.542966ms" +ts=2024-05-02T12:17:22.608386196Z caller=http.go:194 level=debug traceID=7b4f80d1ee25ff1f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.03468ms" +ts=2024-05-02T12:17:22.608012874Z caller=http.go:194 level=debug traceID=6c806c0b8d34b0a2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 293.697µs" +ts=2024-05-02T12:17:22.607526332Z caller=http.go:194 level=debug traceID=741f6350d1b583a4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.324759ms" +ts=2024-05-02T12:17:22.606935977Z caller=http.go:194 level=debug traceID=11ab937a9c46b092 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.176158ms" +ts=2024-05-02T12:17:22.606938258Z caller=http.go:194 level=debug traceID=533ff53d59d95447 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 15.842041ms" +ts=2024-05-02T12:17:22.604414731Z caller=http.go:194 level=debug traceID=14bc4fe16f4a9891 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.343073ms" +ts=2024-05-02T12:17:22.603889068Z caller=http.go:194 level=debug traceID=31ebb48b7e790d75 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.086534ms" +ts=2024-05-02T12:17:22.603662908Z caller=http.go:194 level=debug traceID=147abed3c0d501fd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.126411ms" +ts=2024-05-02T12:17:22.60317544Z caller=http.go:194 level=debug traceID=5d3b6f9b2b920acb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.813872ms" +ts=2024-05-02T12:17:22.602852116Z caller=http.go:194 level=debug traceID=095059f92152480c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.60154ms" +ts=2024-05-02T12:17:22.602847367Z caller=http.go:194 level=debug traceID=064645d7ad0423ea orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.475831ms" +ts=2024-05-02T12:17:22.602080514Z caller=http.go:194 level=debug traceID=34a829de7d2e760d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.179887ms" +ts=2024-05-02T12:17:22.601860569Z caller=http.go:194 level=debug traceID=51106e7db03d4731 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.727242ms" +ts=2024-05-02T12:17:22.601355889Z caller=http.go:194 level=debug traceID=4fca9e5120913ff2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 49.901669ms" +ts=2024-05-02T12:17:22.601183652Z caller=http.go:194 level=debug traceID=2e1e9dd443b502de orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.195353ms" +ts=2024-05-02T12:17:22.600359559Z caller=http.go:194 level=debug traceID=0cca6e76e9ada013 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.839824ms" +ts=2024-05-02T12:17:22.600240939Z caller=http.go:194 level=debug traceID=37c6174c8dd35f48 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.773952ms" +ts=2024-05-02T12:17:22.59994686Z caller=http.go:194 level=debug traceID=3c76aa3b74f1b02e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.637929ms" +ts=2024-05-02T12:17:22.59947906Z caller=http.go:194 level=debug traceID=64d3a6067cabecc1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.018618ms" +ts=2024-05-02T12:17:22.59936756Z caller=http.go:194 level=debug traceID=18b392f80e2c6236 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.523342ms" +ts=2024-05-02T12:17:22.597940464Z caller=http.go:194 level=debug traceID=60256a82fbeaf123 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.013223ms" +ts=2024-05-02T12:17:22.597462471Z caller=http.go:194 level=debug traceID=2f0d8fa7b1404dec orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.744371ms" +ts=2024-05-02T12:17:22.596919692Z caller=http.go:194 level=debug traceID=6c806c0b8d34b0a2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 413.068µs" +ts=2024-05-02T12:17:22.596771981Z caller=http.go:194 level=debug traceID=02df99d2c491527e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.875013ms" +ts=2024-05-02T12:17:22.596747693Z caller=http.go:194 level=debug traceID=11ab937a9c46b092 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.990069ms" +ts=2024-05-02T12:17:22.596380679Z caller=http.go:194 level=debug traceID=741f6350d1b583a4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.73813ms" +ts=2024-05-02T12:17:22.594622184Z caller=http.go:194 level=debug traceID=6c9a69b3027c3f8b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.920234ms" +ts=2024-05-02T12:17:22.594635428Z caller=http.go:194 level=debug traceID=14bc4fe16f4a9891 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.43351ms" +ts=2024-05-02T12:17:22.593595818Z caller=http.go:194 level=debug traceID=5529053d5f2ef12f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.813863ms" +ts=2024-05-02T12:17:22.593280921Z caller=http.go:194 level=debug traceID=795a22b62f0bec7f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.59922ms" +ts=2024-05-02T12:17:22.592778753Z caller=http.go:194 level=debug traceID=095059f92152480c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.705969ms" +ts=2024-05-02T12:17:22.592251616Z caller=http.go:194 level=debug traceID=41c43f52d7994a9e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.057804ms" +ts=2024-05-02T12:17:22.592318113Z caller=http.go:194 level=debug traceID=147abed3c0d501fd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.111975ms" +ts=2024-05-02T12:17:22.592019386Z caller=http.go:194 level=debug traceID=31ebb48b7e790d75 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.006675ms" +ts=2024-05-02T12:17:22.591467272Z caller=http.go:194 level=debug traceID=5d3b6f9b2b920acb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.57895ms" +ts=2024-05-02T12:17:22.591214261Z caller=http.go:194 level=debug traceID=60256a82fbeaf123 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.781595ms" +ts=2024-05-02T12:17:22.590703096Z caller=http.go:194 level=debug traceID=6619d727dc0e4411 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 315.5µs" +ts=2024-05-02T12:17:22.590042853Z caller=http.go:194 level=debug traceID=3c76aa3b74f1b02e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.62657ms" +ts=2024-05-02T12:17:22.590081291Z caller=http.go:194 level=debug traceID=51106e7db03d4731 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.746759ms" +ts=2024-05-02T12:17:22.589972489Z caller=http.go:194 level=debug traceID=37c6174c8dd35f48 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.84889ms" +ts=2024-05-02T12:17:22.589752526Z caller=http.go:194 level=debug traceID=064645d7ad0423ea orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.170761ms" +ts=2024-05-02T12:17:22.587592231Z caller=http.go:194 level=debug traceID=0a74af7948f7f499 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.348191ms" +ts=2024-05-02T12:17:22.587375489Z caller=http.go:194 level=debug traceID=08633aff896b8ab0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.897327ms" +ts=2024-05-02T12:17:22.586775578Z caller=http.go:194 level=debug traceID=2f0d8fa7b1404dec orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.22263ms" +ts=2024-05-02T12:17:22.586766935Z caller=http.go:194 level=debug traceID=5e1012e45f3fc9c7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.518531ms" +ts=2024-05-02T12:17:22.585479358Z caller=http.go:194 level=debug traceID=02df99d2c491527e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.841152ms" +ts=2024-05-02T12:17:22.584826489Z caller=http.go:194 level=debug traceID=26aecf1f306b920a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.821682ms" +ts=2024-05-02T12:17:22.584510599Z caller=http.go:194 level=debug traceID=56e6c358925193ba orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.901207ms" +ts=2024-05-02T12:17:22.584198133Z caller=http.go:194 level=debug traceID=795a22b62f0bec7f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.471384ms" +ts=2024-05-02T12:17:22.584057444Z caller=http.go:194 level=debug traceID=217c04ebf24c79a7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.599905ms" +ts=2024-05-02T12:17:22.58327958Z caller=http.go:194 level=debug traceID=5529053d5f2ef12f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.020845ms" +ts=2024-05-02T12:17:22.58290247Z caller=http.go:194 level=debug traceID=043928823d30dbbb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.176211ms" +ts=2024-05-02T12:17:22.582789019Z caller=http.go:194 level=debug traceID=6c9a69b3027c3f8b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.964077ms" +ts=2024-05-02T12:17:22.581613692Z caller=http.go:194 level=debug traceID=5b555ccb1119f501 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.432632ms" +ts=2024-05-02T12:17:22.580949854Z caller=http.go:194 level=debug traceID=41c43f52d7994a9e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.985503ms" +ts=2024-05-02T12:17:22.581031284Z caller=http.go:194 level=debug traceID=52eea7060c66beaa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.692678ms" +ts=2024-05-02T12:17:22.580888341Z caller=http.go:194 level=debug traceID=6619d727dc0e4411 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 461.714µs" +ts=2024-05-02T12:17:22.580820456Z caller=http.go:194 level=debug traceID=60b5eeacdcc386ba orgID=1218 msg="POST /push.v1.PusherService/Push (200) 19.450463ms" +ts=2024-05-02T12:17:22.579948109Z caller=http.go:194 level=debug traceID=0b63d84f83e8fa6e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.852446ms" +ts=2024-05-02T12:17:22.57941971Z caller=http.go:194 level=debug traceID=37e06b9e960a49fe orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.634168ms" +ts=2024-05-02T12:17:22.577728839Z caller=http.go:194 level=debug traceID=56597169b08fa767 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.277989ms" +ts=2024-05-02T12:17:22.577575397Z caller=http.go:194 level=debug traceID=34f9c53d285608e8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.802377ms" +ts=2024-05-02T12:17:22.577198273Z caller=http.go:194 level=debug traceID=08633aff896b8ab0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.990748ms" +ts=2024-05-02T12:17:22.576574452Z caller=http.go:194 level=debug traceID=7d539d74ed3ca1f1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.34557ms" +ts=2024-05-02T12:17:22.576147481Z caller=http.go:194 level=debug traceID=60b5eeacdcc386ba orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.314435ms" +ts=2024-05-02T12:17:22.575951174Z caller=http.go:194 level=debug traceID=0a74af7948f7f499 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.055461ms" +ts=2024-05-02T12:17:22.574777705Z caller=http.go:194 level=debug traceID=5e1012e45f3fc9c7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.681206ms" +ts=2024-05-02T12:17:22.57461675Z caller=http.go:194 level=debug traceID=00c113560a12b2b6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.57157ms" +ts=2024-05-02T12:17:22.574391272Z caller=http.go:194 level=debug traceID=73d4fd11ed1f5c42 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.51471ms" +ts=2024-05-02T12:17:22.574254922Z caller=http.go:194 level=debug traceID=56e6c358925193ba orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.840999ms" +ts=2024-05-02T12:17:22.573487305Z caller=http.go:194 level=debug traceID=217c04ebf24c79a7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.06277ms" +ts=2024-05-02T12:17:22.57285463Z caller=http.go:194 level=debug traceID=26aecf1f306b920a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.680096ms" +ts=2024-05-02T12:17:22.571887077Z caller=http.go:194 level=debug traceID=3cc0605b87c8587a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.284162ms" +ts=2024-05-02T12:17:22.570407699Z caller=http.go:194 level=debug traceID=043928823d30dbbb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.537578ms" +ts=2024-05-02T12:17:22.570123896Z caller=http.go:194 level=debug traceID=5b555ccb1119f501 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.806428ms" +ts=2024-05-02T12:17:22.57008623Z caller=http.go:194 level=debug traceID=6e311031d6443db3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.214779ms" +ts=2024-05-02T12:17:22.569789963Z caller=http.go:194 level=debug traceID=1d9030c448f3dc47 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.172923ms" +ts=2024-05-02T12:17:22.569334601Z caller=http.go:194 level=debug traceID=52eea7060c66beaa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.430428ms" +ts=2024-05-02T12:17:22.568869729Z caller=http.go:194 level=debug traceID=200df35443478a89 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.440733ms" +ts=2024-05-02T12:17:22.56754244Z caller=http.go:194 level=debug traceID=37e06b9e960a49fe orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.618093ms" +ts=2024-05-02T12:17:22.567400147Z caller=http.go:194 level=debug traceID=31a71f9c450909e1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.055303ms" +ts=2024-05-02T12:17:22.567255411Z caller=http.go:194 level=debug traceID=34f9c53d285608e8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.608603ms" +ts=2024-05-02T12:17:22.566796106Z caller=http.go:194 level=debug traceID=73d202116cc4c7d4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.002463ms" +ts=2024-05-02T12:17:22.56469013Z caller=http.go:194 level=debug traceID=56597169b08fa767 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.6851ms" +ts=2024-05-02T12:17:22.564750808Z caller=http.go:194 level=debug traceID=1b9c1b82edc31cf9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.561839ms" +ts=2024-05-02T12:17:22.564338133Z caller=http.go:194 level=debug traceID=7d539d74ed3ca1f1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.790913ms" +ts=2024-05-02T12:17:22.563924951Z caller=http.go:194 level=debug traceID=14aadde5c9acd0d4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 280.153µs" +ts=2024-05-02T12:17:22.563890081Z caller=http.go:194 level=debug traceID=0b63d84f83e8fa6e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.699938ms" +ts=2024-05-02T12:17:22.563400011Z caller=http.go:194 level=debug traceID=00c113560a12b2b6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.642256ms" +ts=2024-05-02T12:17:22.563361578Z caller=http.go:194 level=debug traceID=73d4fd11ed1f5c42 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.655019ms" +ts=2024-05-02T12:17:22.562280446Z caller=http.go:194 level=debug traceID=515b0c1666a34310 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 50.533975ms" +ts=2024-05-02T12:17:22.561907997Z caller=http.go:194 level=debug traceID=4fca9e5120913ff2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 22.120638ms" +ts=2024-05-02T12:17:22.561480302Z caller=http.go:194 level=debug traceID=2eedb117e83f123d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.539849ms" +ts=2024-05-02T12:17:22.560575117Z caller=http.go:194 level=debug traceID=3cc0605b87c8587a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.262885ms" +ts=2024-05-02T12:17:22.560358611Z caller=http.go:194 level=debug traceID=6e311031d6443db3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.920483ms" +ts=2024-05-02T12:17:22.560282536Z caller=http.go:194 level=debug traceID=1c168bd419bc0706 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.962851ms" +ts=2024-05-02T12:17:22.559881682Z caller=http.go:194 level=debug traceID=76427bbfbba4c754 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.440633ms" +ts=2024-05-02T12:17:22.559731012Z caller=http.go:194 level=debug traceID=200df35443478a89 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.304039ms" +ts=2024-05-02T12:17:22.559441958Z caller=http.go:194 level=debug traceID=5b282cb3fdafaaf2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 239.015µs" +ts=2024-05-02T12:17:22.558754086Z caller=http.go:194 level=debug traceID=1d9030c448f3dc47 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.623004ms" +ts=2024-05-02T12:17:22.557529253Z caller=http.go:194 level=debug traceID=6448542869ac2e68 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.209853ms" +ts=2024-05-02T12:17:22.557147062Z caller=http.go:194 level=debug traceID=7f4e6b91b2ac0780 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.476322ms" +ts=2024-05-02T12:17:22.556697698Z caller=http.go:194 level=debug traceID=3066900a2df7f7cb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.628796ms" +ts=2024-05-02T12:17:22.556321593Z caller=http.go:194 level=debug traceID=116c91fc897b4976 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.664481ms" +ts=2024-05-02T12:17:22.556078155Z caller=http.go:194 level=debug traceID=37253b86aedd8b8e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.287166ms" +ts=2024-05-02T12:17:22.555851955Z caller=http.go:194 level=debug traceID=31a71f9c450909e1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.120566ms" +ts=2024-05-02T12:17:22.554808252Z caller=http.go:194 level=debug traceID=73d202116cc4c7d4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.634467ms" +ts=2024-05-02T12:17:22.554765356Z caller=http.go:194 level=debug traceID=1b9c1b82edc31cf9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.841736ms" +ts=2024-05-02T12:17:22.553773107Z caller=http.go:194 level=debug traceID=2905f81d81a9799a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.014695ms" +ts=2024-05-02T12:17:22.553439631Z caller=http.go:194 level=debug traceID=7f4e6b91b2ac0780 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.113824ms" +ts=2024-05-02T12:17:22.552794713Z caller=http.go:194 level=debug traceID=53fbc451276d002b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.02526ms" +ts=2024-05-02T12:17:22.552834751Z caller=http.go:194 level=debug traceID=14aadde5c9acd0d4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 299.596µs" +ts=2024-05-02T12:17:22.552671502Z caller=http.go:194 level=debug traceID=4e9efd6ea7777db6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.131183ms" +ts=2024-05-02T12:17:22.552674854Z caller=http.go:194 level=debug traceID=5d03147d9d19b505 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 260.279µs" +ts=2024-05-02T12:17:22.551378645Z caller=http.go:194 level=debug traceID=2eedb117e83f123d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.450652ms" +ts=2024-05-02T12:17:22.55081635Z caller=http.go:194 level=debug traceID=562913b4c591a781 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 295.092µs" +ts=2024-05-02T12:17:22.54937023Z caller=http.go:194 level=debug traceID=6ecb3bd9df978588 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.350855ms" +ts=2024-05-02T12:17:22.548924629Z caller=http.go:194 level=debug traceID=76427bbfbba4c754 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.7249ms" +ts=2024-05-02T12:17:22.548794502Z caller=http.go:194 level=debug traceID=3702bd28bb9cf0be orgID=1218 msg="POST /push.v1.PusherService/Push (200) 48.445201ms" +ts=2024-05-02T12:17:22.548616747Z caller=http.go:194 level=debug traceID=216412a713c32707 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 20.578348ms" +ts=2024-05-02T12:17:22.548404211Z caller=http.go:194 level=debug traceID=49368058ff93b56e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.397113ms" +ts=2024-05-02T12:17:22.548393039Z caller=http.go:194 level=debug traceID=5320310fa6e80adb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.059085ms" +ts=2024-05-02T12:17:22.548323277Z caller=http.go:194 level=debug traceID=1c168bd419bc0706 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.55934ms" +ts=2024-05-02T12:17:22.548119768Z caller=http.go:194 level=debug traceID=5b282cb3fdafaaf2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 393.943µs" +ts=2024-05-02T12:17:22.54773938Z caller=http.go:194 level=debug traceID=2524c48ebaeb265c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.214099ms" +ts=2024-05-02T12:17:22.547152733Z caller=http.go:194 level=debug traceID=6448542869ac2e68 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.854357ms" +ts=2024-05-02T12:17:22.546296528Z caller=http.go:194 level=debug traceID=3066900a2df7f7cb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.424963ms" +ts=2024-05-02T12:17:22.545160051Z caller=http.go:194 level=debug traceID=116c91fc897b4976 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.814917ms" +ts=2024-05-02T12:17:22.545003253Z caller=http.go:194 level=debug traceID=0430c0af9e021a2d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.534052ms" +ts=2024-05-02T12:17:22.544832855Z caller=http.go:194 level=debug traceID=2350e73848bb7756 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.745972ms" +ts=2024-05-02T12:17:22.544505265Z caller=http.go:194 level=debug traceID=0102c80c6b685dc1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.514632ms" +ts=2024-05-02T12:17:22.542835533Z caller=http.go:194 level=debug traceID=5d03147d9d19b505 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 353.311µs" +ts=2024-05-02T12:17:22.542278778Z caller=http.go:194 level=debug traceID=53fbc451276d002b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.548004ms" +ts=2024-05-02T12:17:22.542230246Z caller=http.go:194 level=debug traceID=2905f81d81a9799a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.627232ms" +ts=2024-05-02T12:17:22.541636674Z caller=http.go:194 level=debug traceID=37253b86aedd8b8e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.591604ms" +ts=2024-05-02T12:17:22.541505375Z caller=http.go:194 level=debug traceID=4e9efd6ea7777db6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.52628ms" +ts=2024-05-02T12:17:22.539438485Z caller=http.go:194 level=debug traceID=5320310fa6e80adb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.125455ms" +ts=2024-05-02T12:17:22.539387542Z caller=http.go:194 level=debug traceID=562913b4c591a781 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 341.199µs" +ts=2024-05-02T12:17:22.539283223Z caller=http.go:194 level=debug traceID=216412a713c32707 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.691079ms" +ts=2024-05-02T12:17:22.538546406Z caller=http.go:194 level=debug traceID=35dc59c891dad09e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.254381ms" +ts=2024-05-02T12:17:22.538455908Z caller=http.go:194 level=debug traceID=1e0dc54d273779c6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.778827ms" +ts=2024-05-02T12:17:22.538103464Z caller=http.go:194 level=debug traceID=2ba614dcd06133b5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.43986ms" +ts=2024-05-02T12:17:22.538166466Z caller=http.go:194 level=debug traceID=49368058ff93b56e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.790243ms" +ts=2024-05-02T12:17:22.537899729Z caller=http.go:194 level=debug traceID=0fe26d7ca46fc151 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.94905ms" +ts=2024-05-02T12:17:22.537576461Z caller=http.go:194 level=debug traceID=43bf82f1133f92fb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.364271ms" +ts=2024-05-02T12:17:22.537247485Z caller=http.go:194 level=debug traceID=6ecb3bd9df978588 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.989483ms" +ts=2024-05-02T12:17:22.537193879Z caller=http.go:194 level=debug traceID=2524c48ebaeb265c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.067453ms" +ts=2024-05-02T12:17:22.536711203Z caller=http.go:194 level=debug traceID=5b442f045a4a4a8c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.289449ms" +ts=2024-05-02T12:17:22.536578401Z caller=http.go:194 level=debug traceID=4f73d8cd147f7ff7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.743441ms" +ts=2024-05-02T12:17:22.536345062Z caller=http.go:194 level=debug traceID=1ab63ffc70e3ddc9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.607231ms" +ts=2024-05-02T12:17:22.534498772Z caller=http.go:194 level=debug traceID=0430c0af9e021a2d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.001958ms" +ts=2024-05-02T12:17:22.534369361Z caller=http.go:194 level=debug traceID=2350e73848bb7756 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.43005ms" +ts=2024-05-02T12:17:22.533139021Z caller=http.go:194 level=debug traceID=0102c80c6b685dc1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.490406ms" +ts=2024-05-02T12:17:22.532907842Z caller=http.go:194 level=debug traceID=16ff6f41683c5293 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.689241ms" +ts=2024-05-02T12:17:22.532786911Z caller=http.go:194 level=debug traceID=7dcea16fffb7d260 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.742779ms" +ts=2024-05-02T12:17:22.532700332Z caller=http.go:194 level=debug traceID=6cec06ca8686900b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.229485ms" +ts=2024-05-02T12:17:22.531887429Z caller=http.go:194 level=debug traceID=7178be0b824d0581 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.075368ms" +ts=2024-05-02T12:17:22.531245609Z caller=http.go:194 level=debug traceID=1c2aacb5803d9534 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.367119ms" +ts=2024-05-02T12:17:22.53118203Z caller=http.go:194 level=debug traceID=4e72ee2ce3bf0ba3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.247577ms" +ts=2024-05-02T12:17:22.530999016Z caller=http.go:194 level=debug traceID=529e6584aefff8a6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.161134ms" +ts=2024-05-02T12:17:22.530684487Z caller=http.go:194 level=debug traceID=31df2f36d2937da8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.337982ms" +ts=2024-05-02T12:17:22.529684719Z caller=http.go:194 level=debug traceID=7dbfce079451f37a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.477623ms" +ts=2024-05-02T12:17:22.528610317Z caller=http.go:194 level=debug traceID=0e82a44bd0a65e6b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.854044ms" +ts=2024-05-02T12:17:22.528350001Z caller=http.go:194 level=debug traceID=22bae3ef36d88cda orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.178566ms" +ts=2024-05-02T12:17:22.528030994Z caller=http.go:194 level=debug traceID=055bc906f49d6b3e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.733146ms" +ts=2024-05-02T12:17:22.527559998Z caller=http.go:194 level=debug traceID=5f7b301305d8f22f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 137.768472ms" +ts=2024-05-02T12:17:22.527173298Z caller=http.go:194 level=debug traceID=101f573aea67e52c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.907888ms" +ts=2024-05-02T12:17:22.52720338Z caller=http.go:194 level=debug traceID=3702bd28bb9cf0be orgID=3648 msg="POST /push.v1.PusherService/Push (200) 19.833766ms" +ts=2024-05-02T12:17:22.527193621Z caller=http.go:194 level=debug traceID=2ba614dcd06133b5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.736095ms" +ts=2024-05-02T12:17:22.526967893Z caller=http.go:194 level=debug traceID=0fe26d7ca46fc151 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.208969ms" +ts=2024-05-02T12:17:22.526859893Z caller=http.go:194 level=debug traceID=4f73d8cd147f7ff7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.324924ms" +ts=2024-05-02T12:17:22.526818775Z caller=http.go:194 level=debug traceID=1e0dc54d273779c6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.864215ms" +ts=2024-05-02T12:17:22.525384444Z caller=http.go:194 level=debug traceID=515b0c1666a34310 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 25.470215ms" +ts=2024-05-02T12:17:22.525290521Z caller=http.go:194 level=debug traceID=5b442f045a4a4a8c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.845612ms" +ts=2024-05-02T12:17:22.525004555Z caller=http.go:194 level=debug traceID=43bf82f1133f92fb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.663647ms" +ts=2024-05-02T12:17:22.524787897Z caller=http.go:194 level=debug traceID=5f0241c539df5809 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.913664ms" +ts=2024-05-02T12:17:22.524098588Z caller=http.go:194 level=debug traceID=1ab63ffc70e3ddc9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.94257ms" +ts=2024-05-02T12:17:22.522177253Z caller=http.go:194 level=debug traceID=35dc59c891dad09e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.302955ms" +ts=2024-05-02T12:17:22.52187127Z caller=http.go:194 level=debug traceID=5bdbaa38dd4e5e8e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.081237ms" +ts=2024-05-02T12:17:22.521899764Z caller=http.go:194 level=debug traceID=69942979fddd02e7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.279831ms" +ts=2024-05-02T12:17:22.521568035Z caller=http.go:194 level=debug traceID=330c051f397441da orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.138591ms" +ts=2024-05-02T12:17:22.520615616Z caller=http.go:194 level=debug traceID=529e6584aefff8a6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.056615ms" +ts=2024-05-02T12:17:22.520502612Z caller=http.go:194 level=debug traceID=21225d69a9ceb6be orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.569597ms" +ts=2024-05-02T12:17:22.52033712Z caller=http.go:194 level=debug traceID=1c2aacb5803d9534 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.579096ms" +ts=2024-05-02T12:17:22.520263944Z caller=http.go:194 level=debug traceID=16ff6f41683c5293 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.312286ms" +ts=2024-05-02T12:17:22.519912351Z caller=http.go:194 level=debug traceID=7dbfce079451f37a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.761748ms" +ts=2024-05-02T12:17:22.519814061Z caller=http.go:194 level=debug traceID=7178be0b824d0581 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.923614ms" +ts=2024-05-02T12:17:22.519611813Z caller=http.go:194 level=debug traceID=6cec06ca8686900b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.625043ms" +ts=2024-05-02T12:17:22.51960417Z caller=http.go:194 level=debug traceID=79682f57072e0e84 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.23544ms" +ts=2024-05-02T12:17:22.518802359Z caller=http.go:194 level=debug traceID=31df2f36d2937da8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.660686ms" +ts=2024-05-02T12:17:22.51888903Z caller=http.go:194 level=debug traceID=7dcea16fffb7d260 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.596029ms" +ts=2024-05-02T12:17:22.51793095Z caller=http.go:194 level=debug traceID=65af401a7c0e48a0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.09745ms" +ts=2024-05-02T12:17:22.516050773Z caller=http.go:194 level=debug traceID=5f0241c539df5809 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.583955ms" +ts=2024-05-02T12:17:22.515976218Z caller=http.go:194 level=debug traceID=31349812f6da007f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.899212ms" +ts=2024-05-02T12:17:22.515944576Z caller=http.go:194 level=debug traceID=4e72ee2ce3bf0ba3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.233169ms" +ts=2024-05-02T12:17:22.515908657Z caller=http.go:194 level=debug traceID=48d4ff8ed9e0e659 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.859636ms" +ts=2024-05-02T12:17:22.515592711Z caller=http.go:194 level=debug traceID=101f573aea67e52c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.52766ms" +ts=2024-05-02T12:17:22.514246629Z caller=http.go:194 level=debug traceID=0e82a44bd0a65e6b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.212169ms" +ts=2024-05-02T12:17:22.514235623Z caller=http.go:194 level=debug traceID=465035377949feb0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.245673ms" +ts=2024-05-02T12:17:22.514175515Z caller=http.go:194 level=debug traceID=22bae3ef36d88cda orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.28219ms" +ts=2024-05-02T12:17:22.512439483Z caller=http.go:194 level=debug traceID=0844176d5bf1f3e6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.806442ms" +ts=2024-05-02T12:17:22.511624328Z caller=http.go:194 level=debug traceID=32c8cb2ab0e0d1aa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.209817ms" +ts=2024-05-02T12:17:22.511380684Z caller=http.go:194 level=debug traceID=330c051f397441da orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.904722ms" +ts=2024-05-02T12:17:22.511255006Z caller=http.go:194 level=debug traceID=5bdbaa38dd4e5e8e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.713082ms" +ts=2024-05-02T12:17:22.510037382Z caller=http.go:194 level=debug traceID=51a78993b238ce45 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.178658ms" +ts=2024-05-02T12:17:22.509707941Z caller=http.go:194 level=debug traceID=69942979fddd02e7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.84271ms" +ts=2024-05-02T12:17:22.509570006Z caller=http.go:194 level=debug traceID=055bc906f49d6b3e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.755176ms" +ts=2024-05-02T12:17:22.509343499Z caller=http.go:194 level=debug traceID=21225d69a9ceb6be orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.776705ms" +ts=2024-05-02T12:17:22.508994226Z caller=http.go:194 level=debug traceID=79682f57072e0e84 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.641706ms" +ts=2024-05-02T12:17:22.508754212Z caller=http.go:194 level=debug traceID=3942623054f60e7e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.639053ms" +ts=2024-05-02T12:17:22.508571381Z caller=http.go:194 level=debug traceID=68e9ed36b209d93d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.35246ms" +ts=2024-05-02T12:17:22.506593415Z caller=http.go:194 level=debug traceID=0a2cc53818122ae4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.286685ms" +ts=2024-05-02T12:17:22.50621711Z caller=http.go:194 level=debug traceID=65af401a7c0e48a0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.795779ms" +ts=2024-05-02T12:17:22.505696348Z caller=http.go:194 level=debug traceID=23e12e6bdd28bbaa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.282438ms" +ts=2024-05-02T12:17:22.505395404Z caller=http.go:194 level=debug traceID=2e79fb3297000f6d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.637344ms" +ts=2024-05-02T12:17:22.504883664Z caller=http.go:194 level=debug traceID=2a5dad2ef2a99987 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 295.092µs" +ts=2024-05-02T12:17:22.504271767Z caller=http.go:194 level=debug traceID=31349812f6da007f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.617608ms" +ts=2024-05-02T12:17:22.504206856Z caller=http.go:194 level=debug traceID=48d4ff8ed9e0e659 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.192141ms" +ts=2024-05-02T12:17:22.504197656Z caller=http.go:194 level=debug traceID=15dd42395d74238f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.091349ms" +ts=2024-05-02T12:17:22.503111843Z caller=http.go:194 level=debug traceID=465035377949feb0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.459134ms" +ts=2024-05-02T12:17:22.502743619Z caller=http.go:194 level=debug traceID=7c6badf50d9cd380 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.293939ms" +ts=2024-05-02T12:17:22.501251215Z caller=http.go:194 level=debug traceID=7015e8b0caa4d217 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.97779ms" +ts=2024-05-02T12:17:22.501210453Z caller=http.go:194 level=debug traceID=4da1786824420093 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.181226ms" +ts=2024-05-02T12:17:22.501182668Z caller=http.go:194 level=debug traceID=0844176d5bf1f3e6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.747568ms" +ts=2024-05-02T12:17:22.500924921Z caller=http.go:194 level=debug traceID=7015e8b0caa4d217 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.920686ms" +ts=2024-05-02T12:17:22.499959667Z caller=http.go:194 level=debug traceID=334da9254440bf34 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.902775ms" +ts=2024-05-02T12:17:22.499806838Z caller=http.go:194 level=debug traceID=51a78993b238ce45 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.865428ms" +ts=2024-05-02T12:17:22.499656336Z caller=http.go:194 level=debug traceID=79b88c6d6af200c2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 13.413851ms" +ts=2024-05-02T12:17:22.499395727Z caller=http.go:194 level=debug traceID=499496c598b2a61a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.25661ms" +ts=2024-05-02T12:17:22.498336384Z caller=http.go:194 level=debug traceID=32c8cb2ab0e0d1aa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.680883ms" +ts=2024-05-02T12:17:22.498289314Z caller=http.go:194 level=debug traceID=1d0e61840f30e804 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.639138ms" +ts=2024-05-02T12:17:22.497862117Z caller=http.go:194 level=debug traceID=1fd004b44f49ba96 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.448733ms" +ts=2024-05-02T12:17:22.497816453Z caller=http.go:194 level=debug traceID=3942623054f60e7e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.008748ms" +ts=2024-05-02T12:17:22.497577945Z caller=http.go:194 level=debug traceID=78e2f11326c669df orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.861134ms" +ts=2024-05-02T12:17:22.49687321Z caller=http.go:194 level=debug traceID=68e9ed36b209d93d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.74622ms" +ts=2024-05-02T12:17:22.496430705Z caller=http.go:194 level=debug traceID=0a2cc53818122ae4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.156445ms" +ts=2024-05-02T12:17:22.494740115Z caller=http.go:194 level=debug traceID=79b88c6d6af200c2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 20.149046ms" +ts=2024-05-02T12:17:22.494780495Z caller=http.go:194 level=debug traceID=6cf5f86f62507af5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 250.265µs" +ts=2024-05-02T12:17:22.494533147Z caller=http.go:194 level=debug traceID=2a5dad2ef2a99987 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 383.172µs" +ts=2024-05-02T12:17:22.494182605Z caller=http.go:194 level=debug traceID=23e12e6bdd28bbaa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.936003ms" +ts=2024-05-02T12:17:22.493717496Z caller=http.go:194 level=debug traceID=296d8eb183ca8668 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.18066ms" +ts=2024-05-02T12:17:22.493615013Z caller=http.go:194 level=debug traceID=2e79fb3297000f6d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.837359ms" +ts=2024-05-02T12:17:22.493547901Z caller=http.go:194 level=debug traceID=15dd42395d74238f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.470193ms" +ts=2024-05-02T12:17:22.49211Z caller=http.go:194 level=debug traceID=7c6badf50d9cd380 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.895918ms" +ts=2024-05-02T12:17:22.492087686Z caller=http.go:194 level=debug traceID=2abdff076163a032 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.372818ms" +ts=2024-05-02T12:17:22.491642387Z caller=http.go:194 level=debug traceID=2c491d4f1453e35a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.832724ms" +ts=2024-05-02T12:17:22.490995051Z caller=http.go:194 level=debug traceID=4be7212b23b90022 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.565166ms" +ts=2024-05-02T12:17:22.490497403Z caller=http.go:194 level=debug traceID=4da1786824420093 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.144157ms" +ts=2024-05-02T12:17:22.489807703Z caller=http.go:194 level=debug traceID=3ed79d752f3b8500 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.154996ms" +ts=2024-05-02T12:17:22.488577041Z caller=http.go:194 level=debug traceID=334da9254440bf34 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.686783ms" +ts=2024-05-02T12:17:22.488267655Z caller=http.go:194 level=debug traceID=66c03cc208ce5dea orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.651265ms" +ts=2024-05-02T12:17:22.488094528Z caller=http.go:194 level=debug traceID=1fd004b44f49ba96 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.727691ms" +ts=2024-05-02T12:17:22.487275213Z caller=http.go:194 level=debug traceID=1d0e61840f30e804 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.269413ms" +ts=2024-05-02T12:17:22.486666444Z caller=http.go:194 level=debug traceID=78e2f11326c669df orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.302556ms" +ts=2024-05-02T12:17:22.485210793Z caller=http.go:194 level=debug traceID=499496c598b2a61a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.476502ms" +ts=2024-05-02T12:17:22.484253624Z caller=http.go:194 level=debug traceID=6cf5f86f62507af5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 337.95µs" +ts=2024-05-02T12:17:22.483935293Z caller=http.go:194 level=debug traceID=22846b9f8cf7f7e0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.682443ms" +ts=2024-05-02T12:17:22.483880771Z caller=http.go:194 level=debug traceID=26ca6ea3b29d98b2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.902592ms" +ts=2024-05-02T12:17:22.483627246Z caller=http.go:194 level=debug traceID=29e130ebb7b86104 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.574544ms" +ts=2024-05-02T12:17:22.483134276Z caller=http.go:194 level=debug traceID=296d8eb183ca8668 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.79981ms" +ts=2024-05-02T12:17:22.482738161Z caller=http.go:194 level=debug traceID=14a0a0e1c71a565b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.904058ms" +ts=2024-05-02T12:17:22.482115441Z caller=http.go:194 level=debug traceID=2c491d4f1453e35a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.075176ms" +ts=2024-05-02T12:17:22.482254773Z caller=http.go:194 level=debug traceID=3e8954ac8d960580 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.81048ms" +ts=2024-05-02T12:17:22.481910933Z caller=http.go:194 level=debug traceID=2abdff076163a032 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.552928ms" +ts=2024-05-02T12:17:22.480390711Z caller=http.go:194 level=debug traceID=220454ae177ce84b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.763102ms" +ts=2024-05-02T12:17:22.479703089Z caller=http.go:194 level=debug traceID=4be7212b23b90022 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.605346ms" +ts=2024-05-02T12:17:22.479051135Z caller=http.go:194 level=debug traceID=3ed79d752f3b8500 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.713369ms" +ts=2024-05-02T12:17:22.478701672Z caller=http.go:194 level=debug traceID=7815296b8dd8c940 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.246757ms" +ts=2024-05-02T12:17:22.478431079Z caller=http.go:194 level=debug traceID=66c03cc208ce5dea orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.487381ms" +ts=2024-05-02T12:17:22.47622057Z caller=http.go:194 level=debug traceID=66791908f0ee390d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.19499ms" +ts=2024-05-02T12:17:22.475458512Z caller=http.go:194 level=debug traceID=1896f10a98340011 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.058004ms" +ts=2024-05-02T12:17:22.474503708Z caller=http.go:194 level=debug traceID=5e40f09f5f09c3b0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.34384ms" +ts=2024-05-02T12:17:22.473396723Z caller=http.go:194 level=debug traceID=26ca6ea3b29d98b2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.556792ms" +ts=2024-05-02T12:17:22.473139073Z caller=http.go:194 level=debug traceID=79b16d5463a75575 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.934904ms" +ts=2024-05-02T12:17:22.472916082Z caller=http.go:194 level=debug traceID=7815296b8dd8c940 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.41504ms" +ts=2024-05-02T12:17:22.472410564Z caller=http.go:194 level=debug traceID=29e130ebb7b86104 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.641342ms" +ts=2024-05-02T12:17:22.472468228Z caller=http.go:194 level=debug traceID=4759eaa30a4da7fc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.450225ms" +ts=2024-05-02T12:17:22.471962962Z caller=http.go:194 level=debug traceID=613235846c6e5954 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.616595ms" +ts=2024-05-02T12:17:22.471983663Z caller=http.go:194 level=debug traceID=304fe39a19658e28 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.035463ms" +ts=2024-05-02T12:17:22.471658123Z caller=http.go:194 level=debug traceID=48758c0747fcc2da orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.402206ms" +ts=2024-05-02T12:17:22.471503694Z caller=http.go:194 level=debug traceID=3e8954ac8d960580 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.427383ms" +ts=2024-05-02T12:17:22.470610603Z caller=http.go:194 level=debug traceID=14a0a0e1c71a565b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.608839ms" +ts=2024-05-02T12:17:22.470514326Z caller=http.go:194 level=debug traceID=6468b23002a8dc3b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.861772ms" +ts=2024-05-02T12:17:22.470077556Z caller=http.go:194 level=debug traceID=220454ae177ce84b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.566333ms" +ts=2024-05-02T12:17:22.469321304Z caller=http.go:194 level=debug traceID=22846b9f8cf7f7e0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.007751ms" +ts=2024-05-02T12:17:22.469065316Z caller=http.go:194 level=debug traceID=1453549f5b71ae10 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.373643ms" +ts=2024-05-02T12:17:22.467971851Z caller=http.go:194 level=debug traceID=5b21ce99f8e369d9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.300212ms" +ts=2024-05-02T12:17:22.466687078Z caller=http.go:194 level=debug traceID=66791908f0ee390d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.58887ms" +ts=2024-05-02T12:17:22.464757984Z caller=http.go:194 level=debug traceID=3ca2072baf4695e8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.39459ms" +ts=2024-05-02T12:17:22.464706159Z caller=http.go:194 level=debug traceID=1896f10a98340011 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.585915ms" +ts=2024-05-02T12:17:22.464393174Z caller=http.go:194 level=debug traceID=77b8603fcaf97db5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.830868ms" +ts=2024-05-02T12:17:22.464122724Z caller=http.go:194 level=debug traceID=5e40f09f5f09c3b0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.962686ms" +ts=2024-05-02T12:17:22.463925092Z caller=http.go:194 level=debug traceID=62fa2fd2f3554541 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.758192ms" +ts=2024-05-02T12:17:22.463770679Z caller=http.go:194 level=debug traceID=2fb61905ad8432a5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.147519ms" +ts=2024-05-02T12:17:22.463115366Z caller=http.go:194 level=debug traceID=7f74c11ef73ddb64 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.756335ms" +ts=2024-05-02T12:17:22.462641528Z caller=http.go:194 level=debug traceID=4759eaa30a4da7fc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.942408ms" +ts=2024-05-02T12:17:22.462507013Z caller=http.go:194 level=debug traceID=249f820f299010a9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 28.205452ms" +ts=2024-05-02T12:17:22.462208406Z caller=http.go:194 level=debug traceID=79b16d5463a75575 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.610324ms" +ts=2024-05-02T12:17:22.461998282Z caller=http.go:194 level=debug traceID=6468b23002a8dc3b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.291223ms" +ts=2024-05-02T12:17:22.461630811Z caller=http.go:194 level=debug traceID=5f7b301305d8f22f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 59.497337ms" +ts=2024-05-02T12:17:22.461105964Z caller=http.go:194 level=debug traceID=613235846c6e5954 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.915661ms" +ts=2024-05-02T12:17:22.461012862Z caller=http.go:194 level=debug traceID=3b6bc952ea5b3d1a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.249188ms" +ts=2024-05-02T12:17:22.460412572Z caller=http.go:194 level=debug traceID=3af697850c570130 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 39.685492ms" +ts=2024-05-02T12:17:22.459135696Z caller=http.go:194 level=debug traceID=304fe39a19658e28 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.046053ms" +ts=2024-05-02T12:17:22.459029462Z caller=http.go:194 level=debug traceID=419be6ad649b9091 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 271.857µs" +ts=2024-05-02T12:17:22.457830356Z caller=http.go:194 level=debug traceID=1453549f5b71ae10 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.280312ms" +ts=2024-05-02T12:17:22.457479868Z caller=http.go:194 level=debug traceID=055c854c2252eeea orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.70604ms" +ts=2024-05-02T12:17:22.457464696Z caller=http.go:194 level=debug traceID=48758c0747fcc2da orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.582291ms" +ts=2024-05-02T12:17:22.456616354Z caller=http.go:194 level=debug traceID=5b21ce99f8e369d9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.88599ms" +ts=2024-05-02T12:17:22.455311982Z caller=http.go:194 level=debug traceID=0785c98b01cc14d7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.596453ms" +ts=2024-05-02T12:17:22.454816526Z caller=http.go:194 level=debug traceID=62fa2fd2f3554541 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.627856ms" +ts=2024-05-02T12:17:22.453967864Z caller=http.go:194 level=debug traceID=406974119f1ad9c9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.280394ms" +ts=2024-05-02T12:17:22.453547951Z caller=http.go:194 level=debug traceID=41993740398aad3d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.125235ms" +ts=2024-05-02T12:17:22.45357768Z caller=http.go:194 level=debug traceID=2fb61905ad8432a5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.140522ms" +ts=2024-05-02T12:17:22.452664479Z caller=http.go:194 level=debug traceID=3ca2072baf4695e8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.615562ms" +ts=2024-05-02T12:17:22.452354784Z caller=http.go:194 level=debug traceID=7f74c11ef73ddb64 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.244504ms" +ts=2024-05-02T12:17:22.451746864Z caller=http.go:194 level=debug traceID=321bf20b50821ea4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.725012ms" +ts=2024-05-02T12:17:22.451358733Z caller=http.go:194 level=debug traceID=77b8603fcaf97db5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.424674ms" +ts=2024-05-02T12:17:22.449482599Z caller=http.go:194 level=debug traceID=4aae52bceccd8de8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.625092ms" +ts=2024-05-02T12:17:22.449415858Z caller=http.go:194 level=debug traceID=2a8f6dd07933537f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.924574ms" +ts=2024-05-02T12:17:22.448951763Z caller=http.go:194 level=debug traceID=3b6bc952ea5b3d1a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.790975ms" +ts=2024-05-02T12:17:22.448582319Z caller=http.go:194 level=debug traceID=0669564128fe5d38 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.468042ms" +ts=2024-05-02T12:17:22.448246228Z caller=http.go:194 level=debug traceID=5cd7372a0ddc0a12 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.314346ms" +ts=2024-05-02T12:17:22.448097849Z caller=http.go:194 level=debug traceID=272878d259efdbbf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.588529ms" +ts=2024-05-02T12:17:22.447544021Z caller=http.go:194 level=debug traceID=5304cf2a829fd57d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.524157ms" +ts=2024-05-02T12:17:22.447582857Z caller=http.go:194 level=debug traceID=419be6ad649b9091 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 379.732µs" +ts=2024-05-02T12:17:22.447326741Z caller=http.go:194 level=debug traceID=249f820f299010a9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.48752ms" +ts=2024-05-02T12:17:22.446974196Z caller=http.go:194 level=debug traceID=3ddda983060151f4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.462941ms" +ts=2024-05-02T12:17:22.446478393Z caller=http.go:194 level=debug traceID=055c854c2252eeea orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.061579ms" +ts=2024-05-02T12:17:22.446230846Z caller=http.go:194 level=debug traceID=385fc9d11197e3ff orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.303649ms" +ts=2024-05-02T12:17:22.445573007Z caller=http.go:194 level=debug traceID=1d8cb4b55a24aed1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 320.117µs" +ts=2024-05-02T12:17:22.444797073Z caller=http.go:194 level=debug traceID=5fbca5f8e6ef6280 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.898319ms" +ts=2024-05-02T12:17:22.444515655Z caller=http.go:194 level=debug traceID=406974119f1ad9c9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.668099ms" +ts=2024-05-02T12:17:22.444348574Z caller=http.go:194 level=debug traceID=0785c98b01cc14d7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.911767ms" +ts=2024-05-02T12:17:22.443863741Z caller=http.go:194 level=debug traceID=19303cc88b85e167 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.645483ms" +ts=2024-05-02T12:17:22.443521884Z caller=http.go:194 level=debug traceID=6cb4c6e24fbf60d5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.192875ms" +ts=2024-05-02T12:17:22.443018674Z caller=http.go:194 level=debug traceID=735e50a45bc1a648 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.879786ms" +ts=2024-05-02T12:17:22.442627193Z caller=http.go:194 level=debug traceID=1b91ad470aaced3c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.021022ms" +ts=2024-05-02T12:17:22.441624478Z caller=http.go:194 level=debug traceID=41993740398aad3d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.821602ms" +ts=2024-05-02T12:17:22.441422144Z caller=http.go:194 level=debug traceID=321bf20b50821ea4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.926089ms" +ts=2024-05-02T12:17:22.440499432Z caller=http.go:194 level=debug traceID=634812dde02f43c9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.160588ms" +ts=2024-05-02T12:17:22.44046662Z caller=http.go:194 level=debug traceID=413c7c01b2963fae orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.985688ms" +ts=2024-05-02T12:17:22.439655659Z caller=http.go:194 level=debug traceID=13a13853e6c2e0c5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.412595ms" +ts=2024-05-02T12:17:22.438643473Z caller=http.go:194 level=debug traceID=4aae52bceccd8de8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.967658ms" +ts=2024-05-02T12:17:22.438335536Z caller=http.go:194 level=debug traceID=6b75494701eae383 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.444191ms" +ts=2024-05-02T12:17:22.43833398Z caller=http.go:194 level=debug traceID=22a7e9778d8148c7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.90846ms" +ts=2024-05-02T12:17:22.437998565Z caller=http.go:194 level=debug traceID=2a8f6dd07933537f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.710532ms" +ts=2024-05-02T12:17:22.437460041Z caller=http.go:194 level=debug traceID=0669564128fe5d38 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.447539ms" +ts=2024-05-02T12:17:22.437305069Z caller=http.go:194 level=debug traceID=5cd7372a0ddc0a12 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.672701ms" +ts=2024-05-02T12:17:22.437162433Z caller=http.go:194 level=debug traceID=272878d259efdbbf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.712027ms" +ts=2024-05-02T12:17:22.4365652Z caller=http.go:194 level=debug traceID=4448dfd0686f8435 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 16.074769ms" +ts=2024-05-02T12:17:22.435952964Z caller=http.go:194 level=debug traceID=14cd1e7bb6d20eb1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.713665ms" +ts=2024-05-02T12:17:22.435649307Z caller=http.go:194 level=debug traceID=3ddda983060151f4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.686496ms" +ts=2024-05-02T12:17:22.435596636Z caller=http.go:194 level=debug traceID=02ad04f0bb2cc8a8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.842965ms" +ts=2024-05-02T12:17:22.435441175Z caller=http.go:194 level=debug traceID=2354df72f44b58f1 orgID=3648 msg="POST /push.v1.PusherService/Push (400) 148.631µs" +ts=2024-05-02T12:17:22.435130198Z caller=http.go:194 level=debug traceID=1d8cb4b55a24aed1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 610.393µs" +ts=2024-05-02T12:17:22.435123364Z caller=http.go:194 level=debug traceID=74e0bef11fc840e3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.327047ms" +ts=2024-05-02T12:17:22.434264561Z caller=http.go:194 level=debug traceID=6cb4c6e24fbf60d5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.986316ms" +ts=2024-05-02T12:17:22.434195676Z caller=http.go:194 level=debug traceID=5304cf2a829fd57d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.053236ms" +ts=2024-05-02T12:17:22.433856447Z caller=http.go:194 level=debug traceID=385fc9d11197e3ff orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.539987ms" +ts=2024-05-02T12:17:22.433711604Z caller=http.go:194 level=debug traceID=19303cc88b85e167 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.798477ms" +ts=2024-05-02T12:17:22.433552489Z caller=http.go:194 level=debug traceID=2360604480fca5f1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.975885ms" +ts=2024-05-02T12:17:22.431720567Z caller=http.go:194 level=debug traceID=76ab8bece46636da orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.628854ms" +ts=2024-05-02T12:17:22.431003684Z caller=http.go:194 level=debug traceID=5fbca5f8e6ef6280 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.272577ms" +ts=2024-05-02T12:17:22.430716717Z caller=http.go:194 level=debug traceID=1b91ad470aaced3c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.623303ms" +ts=2024-05-02T12:17:22.430164865Z caller=http.go:194 level=debug traceID=634812dde02f43c9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.328921ms" +ts=2024-05-02T12:17:22.42987396Z caller=http.go:194 level=debug traceID=4010c7652aff54d2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.513178ms" +ts=2024-05-02T12:17:22.429871933Z caller=http.go:194 level=debug traceID=735e50a45bc1a648 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.741982ms" +ts=2024-05-02T12:17:22.429785969Z caller=http.go:194 level=debug traceID=13a13853e6c2e0c5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.557404ms" +ts=2024-05-02T12:17:22.429539234Z caller=http.go:194 level=debug traceID=413c7c01b2963fae orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.165074ms" +ts=2024-05-02T12:17:22.429105394Z caller=http.go:194 level=debug traceID=0cbaf1d29fc148c6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.089245ms" +ts=2024-05-02T12:17:22.422750346Z caller=http.go:194 level=debug traceID=720a895a663691a0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 16.258458ms" +ts=2024-05-02T12:17:22.428717685Z caller=http.go:194 level=debug traceID=77f405659b6141f5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.18746ms" +ts=2024-05-02T12:17:22.426316591Z caller=http.go:194 level=debug traceID=4448dfd0686f8435 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.62554ms" +ts=2024-05-02T12:17:22.425976255Z caller=http.go:194 level=debug traceID=02ad04f0bb2cc8a8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.029101ms" +ts=2024-05-02T12:17:22.425980631Z caller=http.go:194 level=debug traceID=6b75494701eae383 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.620368ms" +ts=2024-05-02T12:17:22.425213077Z caller=http.go:194 level=debug traceID=14cd1e7bb6d20eb1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.055945ms" +ts=2024-05-02T12:17:22.424763736Z caller=http.go:194 level=debug traceID=74e0bef11fc840e3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.532382ms" +ts=2024-05-02T12:17:22.424528879Z caller=http.go:194 level=debug traceID=2354df72f44b58f1 orgID=1218 msg="POST /push.v1.PusherService/Push (400) 240.791µs" +ts=2024-05-02T12:17:22.424447742Z caller=http.go:194 level=debug traceID=3af697850c570130 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.115333ms" +ts=2024-05-02T12:17:22.424482569Z caller=http.go:194 level=debug traceID=089e039da94281d3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.107763ms" +ts=2024-05-02T12:17:22.424412447Z caller=http.go:194 level=debug traceID=558b4cdf830f8935 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.960604ms" +ts=2024-05-02T12:17:22.423766303Z caller=http.go:194 level=debug traceID=22a7e9778d8148c7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.361052ms" +ts=2024-05-02T12:17:22.423176974Z caller=http.go:194 level=debug traceID=2360604480fca5f1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.66655ms" +ts=2024-05-02T12:17:22.423007012Z caller=http.go:194 level=debug traceID=092c55c08ac28c13 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 287.498µs" +ts=2024-05-02T12:17:22.421074064Z caller=http.go:194 level=debug traceID=76ab8bece46636da orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.430374ms" +ts=2024-05-02T12:17:22.419458175Z caller=http.go:194 level=debug traceID=5933138a0d4159b1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.172629ms" +ts=2024-05-02T12:17:22.418659479Z caller=http.go:194 level=debug traceID=720a895a663691a0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.001371ms" +ts=2024-05-02T12:17:22.418542639Z caller=http.go:194 level=debug traceID=0cbaf1d29fc148c6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.63859ms" +ts=2024-05-02T12:17:22.41724059Z caller=http.go:194 level=debug traceID=0d09b7b48ab3d77f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.803325ms" +ts=2024-05-02T12:17:22.417034985Z caller=http.go:194 level=debug traceID=45a1c0d5b976fa75 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.275151ms" +ts=2024-05-02T12:17:22.41667192Z caller=http.go:194 level=debug traceID=77f405659b6141f5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.159783ms" +ts=2024-05-02T12:17:22.4165637Z caller=http.go:194 level=debug traceID=78d2f7bdc34bbc6a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.105466ms" +ts=2024-05-02T12:17:22.416501344Z caller=http.go:194 level=debug traceID=4010c7652aff54d2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.553342ms" +ts=2024-05-02T12:17:22.415989042Z caller=http.go:194 level=debug traceID=1a0ab8868e276b9f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.733012ms" +ts=2024-05-02T12:17:22.41471032Z caller=http.go:194 level=debug traceID=2a8da47dbfc6db5d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.351937ms" +ts=2024-05-02T12:17:22.414602943Z caller=http.go:194 level=debug traceID=5923bf5cdb1e70ac orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.381813ms" +ts=2024-05-02T12:17:22.414098965Z caller=http.go:194 level=debug traceID=19dbc3fb7323bf29 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.537068ms" +ts=2024-05-02T12:17:22.413457371Z caller=http.go:194 level=debug traceID=558b4cdf830f8935 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.96137ms" +ts=2024-05-02T12:17:22.41309743Z caller=http.go:194 level=debug traceID=089e039da94281d3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.970779ms" +ts=2024-05-02T12:17:22.411948002Z caller=http.go:194 level=debug traceID=08aa17e3ac2072de orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.076945ms" +ts=2024-05-02T12:17:22.412012676Z caller=http.go:194 level=debug traceID=7afa09dbda2e2271 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.846931ms" +ts=2024-05-02T12:17:22.411335863Z caller=http.go:194 level=debug traceID=092c55c08ac28c13 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 324.61µs" +ts=2024-05-02T12:17:22.410649515Z caller=http.go:194 level=debug traceID=6bc8a1fbf9b86818 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.109777ms" +ts=2024-05-02T12:17:22.410152909Z caller=http.go:194 level=debug traceID=4ab9753508294ee5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 255.933µs" +ts=2024-05-02T12:17:22.40999961Z caller=http.go:194 level=debug traceID=240462f553f01b3e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.997493ms" +ts=2024-05-02T12:17:22.409818001Z caller=http.go:194 level=debug traceID=62507f42f97747fd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 23.476561ms" +ts=2024-05-02T12:17:22.409381017Z caller=http.go:194 level=debug traceID=6270d495b850b11e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 344.532µs" +ts=2024-05-02T12:17:22.408626514Z caller=http.go:194 level=debug traceID=5933138a0d4159b1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.314569ms" +ts=2024-05-02T12:17:22.407520113Z caller=http.go:194 level=debug traceID=0d09b7b48ab3d77f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.84516ms" +ts=2024-05-02T12:17:22.405931272Z caller=http.go:194 level=debug traceID=35fa20a92ec1b43a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.528301ms" +ts=2024-05-02T12:17:22.405742142Z caller=http.go:194 level=debug traceID=3903d9cddcab3a9d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.972073ms" +ts=2024-05-02T12:17:22.405079085Z caller=http.go:194 level=debug traceID=19dbc3fb7323bf29 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.438825ms" +ts=2024-05-02T12:17:22.404630973Z caller=http.go:194 level=debug traceID=45a1c0d5b976fa75 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.706964ms" +ts=2024-05-02T12:17:22.404516733Z caller=http.go:194 level=debug traceID=5923bf5cdb1e70ac orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.619058ms" +ts=2024-05-02T12:17:22.4045148Z caller=http.go:194 level=debug traceID=2a8da47dbfc6db5d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.228615ms" +ts=2024-05-02T12:17:22.404380889Z caller=http.go:194 level=debug traceID=1a0ab8868e276b9f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.265023ms" +ts=2024-05-02T12:17:22.404137783Z caller=http.go:194 level=debug traceID=7afa09dbda2e2271 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.078114ms" +ts=2024-05-02T12:17:22.402843074Z caller=http.go:194 level=debug traceID=78d2f7bdc34bbc6a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.137659ms" +ts=2024-05-02T12:17:22.402416635Z caller=http.go:194 level=debug traceID=52d03ac22bb212ff orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.711482ms" +ts=2024-05-02T12:17:22.401792536Z caller=http.go:194 level=debug traceID=49e63da619fb8191 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.593ms" +ts=2024-05-02T12:17:22.401538034Z caller=http.go:194 level=debug traceID=31f15fd2b1f1b0c0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.904292ms" +ts=2024-05-02T12:17:22.400348971Z caller=http.go:194 level=debug traceID=4817476e8972db2f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.887837ms" +ts=2024-05-02T12:17:22.400265231Z caller=http.go:194 level=debug traceID=08aa17e3ac2072de orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.700439ms" +ts=2024-05-02T12:17:22.399365856Z caller=http.go:194 level=debug traceID=6a15f1f8666f74cf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.753003ms" +ts=2024-05-02T12:17:22.399322486Z caller=http.go:194 level=debug traceID=6270d495b850b11e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 366.658µs" +ts=2024-05-02T12:17:22.399215632Z caller=http.go:194 level=debug traceID=240462f553f01b3e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.382467ms" +ts=2024-05-02T12:17:22.398890289Z caller=http.go:194 level=debug traceID=4ab9753508294ee5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 339.853µs" +ts=2024-05-02T12:17:22.397656712Z caller=http.go:194 level=debug traceID=6bc8a1fbf9b86818 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.899636ms" +ts=2024-05-02T12:17:22.397402719Z caller=http.go:194 level=debug traceID=35fa20a92ec1b43a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.209034ms" +ts=2024-05-02T12:17:22.397190944Z caller=http.go:194 level=debug traceID=2ab7a204a63c11d5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.34366ms" +ts=2024-05-02T12:17:22.39635465Z caller=http.go:194 level=debug traceID=3ffffbe04f446690 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.195767ms" +ts=2024-05-02T12:17:22.396355755Z caller=http.go:194 level=debug traceID=69bceb72d64ee887 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.836043ms" +ts=2024-05-02T12:17:22.395315426Z caller=http.go:194 level=debug traceID=3aa3da78f25ffee2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.587485ms" +ts=2024-05-02T12:17:22.392425186Z caller=http.go:194 level=debug traceID=52d03ac22bb212ff orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.783582ms" +ts=2024-05-02T12:17:22.391711619Z caller=http.go:194 level=debug traceID=31f15fd2b1f1b0c0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.241644ms" +ts=2024-05-02T12:17:22.391706671Z caller=http.go:194 level=debug traceID=6f4ef952d717ab99 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.763258ms" +ts=2024-05-02T12:17:22.39160207Z caller=http.go:194 level=debug traceID=49e63da619fb8191 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.734074ms" +ts=2024-05-02T12:17:22.391657803Z caller=http.go:194 level=debug traceID=3903d9cddcab3a9d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.949951ms" +ts=2024-05-02T12:17:22.390273902Z caller=http.go:194 level=debug traceID=0695fa9d52d1f8b4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.570461ms" +ts=2024-05-02T12:17:22.389703914Z caller=http.go:194 level=debug traceID=64faddd4548db4f1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.611079ms" +ts=2024-05-02T12:17:22.38939204Z caller=http.go:194 level=debug traceID=3f041f3326481bc9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 380.289µs" +ts=2024-05-02T12:17:22.388330605Z caller=http.go:194 level=debug traceID=0b48211f2661aac4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.843806ms" +ts=2024-05-02T12:17:22.388186644Z caller=http.go:194 level=debug traceID=4817476e8972db2f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.732559ms" +ts=2024-05-02T12:17:22.388182556Z caller=http.go:194 level=debug traceID=6a15f1f8666f74cf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.60786ms" +ts=2024-05-02T12:17:22.387710558Z caller=http.go:194 level=debug traceID=2b571f9795231586 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.450182ms" +ts=2024-05-02T12:17:22.387428925Z caller=http.go:194 level=debug traceID=4175bec154142343 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.389814ms" +ts=2024-05-02T12:17:22.387023688Z caller=http.go:194 level=debug traceID=2ab7a204a63c11d5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.737666ms" +ts=2024-05-02T12:17:22.385565971Z caller=http.go:194 level=debug traceID=18d236c2e2795158 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.369342ms" +ts=2024-05-02T12:17:22.384742997Z caller=http.go:194 level=debug traceID=3aa3da78f25ffee2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.333813ms" +ts=2024-05-02T12:17:22.384632027Z caller=http.go:194 level=debug traceID=69bceb72d64ee887 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.769139ms" +ts=2024-05-02T12:17:22.384360908Z caller=http.go:194 level=debug traceID=62507f42f97747fd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.964423ms" +ts=2024-05-02T12:17:22.384218441Z caller=http.go:194 level=debug traceID=3ffffbe04f446690 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.538506ms" +ts=2024-05-02T12:17:22.383627323Z caller=http.go:194 level=debug traceID=1afe91c8cc16a5ca orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.644776ms" +ts=2024-05-02T12:17:22.382881334Z caller=http.go:194 level=debug traceID=6ba6ae0dc5c9f3e0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.687832ms" +ts=2024-05-02T12:17:22.382094565Z caller=http.go:194 level=debug traceID=50467dabf1d9da6b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.96854ms" +ts=2024-05-02T12:17:22.381288669Z caller=http.go:194 level=debug traceID=521c2a3760ede13b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.430083ms" +ts=2024-05-02T12:17:22.380549529Z caller=http.go:194 level=debug traceID=6f4ef952d717ab99 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.68352ms" +ts=2024-05-02T12:17:22.378886412Z caller=http.go:194 level=debug traceID=0d1e44d18747ac38 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.836336ms" +ts=2024-05-02T12:17:22.378528721Z caller=http.go:194 level=debug traceID=0695fa9d52d1f8b4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.96756ms" +ts=2024-05-02T12:17:22.378320461Z caller=http.go:194 level=debug traceID=64faddd4548db4f1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.594275ms" +ts=2024-05-02T12:17:22.37825952Z caller=http.go:194 level=debug traceID=63cba6292e728ae8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.379683ms" +ts=2024-05-02T12:17:22.378107503Z caller=http.go:194 level=debug traceID=3f041f3326481bc9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 476.9µs" +ts=2024-05-02T12:17:22.377962058Z caller=http.go:194 level=debug traceID=3be0464b2c33d367 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.987253ms" +ts=2024-05-02T12:17:22.377955065Z caller=http.go:194 level=debug traceID=0b48211f2661aac4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.381386ms" +ts=2024-05-02T12:17:22.377426626Z caller=http.go:194 level=debug traceID=2b571f9795231586 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.192546ms" +ts=2024-05-02T12:17:22.377232491Z caller=http.go:194 level=debug traceID=74360be01605baa7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.521058ms" +ts=2024-05-02T12:17:22.377021925Z caller=http.go:194 level=debug traceID=4175bec154142343 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.30579ms" +ts=2024-05-02T12:17:22.37656515Z caller=http.go:194 level=debug traceID=18d236c2e2795158 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.193865ms" +ts=2024-05-02T12:17:22.376229059Z caller=http.go:194 level=debug traceID=226f392dcb980d99 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.894097ms" +ts=2024-05-02T12:17:22.375510485Z caller=http.go:194 level=debug traceID=42a209a9c714a0f4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.943391ms" +ts=2024-05-02T12:17:22.375388572Z caller=http.go:194 level=debug traceID=1afe91c8cc16a5ca orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.132516ms" +ts=2024-05-02T12:17:22.375221153Z caller=http.go:194 level=debug traceID=45a9a41dbc057631 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.218301ms" +ts=2024-05-02T12:17:22.374173526Z caller=http.go:194 level=debug traceID=3c160f450766d000 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.790693ms" +ts=2024-05-02T12:17:22.37309005Z caller=http.go:194 level=debug traceID=6ba6ae0dc5c9f3e0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.970656ms" +ts=2024-05-02T12:17:22.373106378Z caller=http.go:194 level=debug traceID=0986adf4b3b99b40 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.263381ms" +ts=2024-05-02T12:17:22.372512069Z caller=http.go:194 level=debug traceID=06ea03f6c9f204ed orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.673085ms" +ts=2024-05-02T12:17:22.372043039Z caller=http.go:194 level=debug traceID=6ed0222d1f224591 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.95151ms" +ts=2024-05-02T12:17:22.371409192Z caller=http.go:194 level=debug traceID=5ffd0c05f7efaa18 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.350116ms" +ts=2024-05-02T12:17:22.370833941Z caller=http.go:194 level=debug traceID=4ed9ad8ce302f55f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.542732ms" +ts=2024-05-02T12:17:22.370755744Z caller=http.go:194 level=debug traceID=55bbe53adf1241a0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 328.368µs" +ts=2024-05-02T12:17:22.370778046Z caller=http.go:194 level=debug traceID=521c2a3760ede13b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.86621ms" +ts=2024-05-02T12:17:22.370246606Z caller=http.go:194 level=debug traceID=6a235ca8b94f62de orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.519754ms" +ts=2024-05-02T12:17:22.369694989Z caller=http.go:194 level=debug traceID=50467dabf1d9da6b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.606544ms" +ts=2024-05-02T12:17:22.368771451Z caller=http.go:194 level=debug traceID=63cba6292e728ae8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.936473ms" +ts=2024-05-02T12:17:22.368415133Z caller=http.go:194 level=debug traceID=0d1e44d18747ac38 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.435274ms" +ts=2024-05-02T12:17:22.367704429Z caller=http.go:194 level=debug traceID=74360be01605baa7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.766052ms" +ts=2024-05-02T12:17:22.367669576Z caller=http.go:194 level=debug traceID=3be0464b2c33d367 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.977353ms" +ts=2024-05-02T12:17:22.36511816Z caller=http.go:194 level=debug traceID=4ed9ad8ce302f55f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.894552ms" +ts=2024-05-02T12:17:22.366644251Z caller=http.go:194 level=debug traceID=226f392dcb980d99 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.263156ms" +ts=2024-05-02T12:17:22.365795378Z caller=http.go:194 level=debug traceID=45a9a41dbc057631 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.673779ms" +ts=2024-05-02T12:17:22.364451274Z caller=http.go:194 level=debug traceID=3c160f450766d000 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.547734ms" +ts=2024-05-02T12:17:22.36417316Z caller=http.go:194 level=debug traceID=73fea8713ce19548 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.02369ms" +ts=2024-05-02T12:17:22.364038028Z caller=http.go:194 level=debug traceID=42a209a9c714a0f4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.055742ms" +ts=2024-05-02T12:17:22.363379171Z caller=http.go:194 level=debug traceID=0986adf4b3b99b40 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.683284ms" +ts=2024-05-02T12:17:22.363240149Z caller=http.go:194 level=debug traceID=1fcc7c453eb51eb0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.644326ms" +ts=2024-05-02T12:17:22.363167667Z caller=http.go:194 level=debug traceID=737e7052bdd22e57 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.38829ms" +ts=2024-05-02T12:17:22.363068724Z caller=http.go:194 level=debug traceID=74395099ddb0fd56 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.166222ms" +ts=2024-05-02T12:17:22.362286753Z caller=http.go:194 level=debug traceID=1fe9caebc36b03c0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.898872ms" +ts=2024-05-02T12:17:22.362125185Z caller=http.go:194 level=debug traceID=06ea03f6c9f204ed orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.911432ms" +ts=2024-05-02T12:17:22.361286885Z caller=http.go:194 level=debug traceID=4cbce1c04dfac09e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.938356ms" +ts=2024-05-02T12:17:22.361114664Z caller=http.go:194 level=debug traceID=3a8756bd12303e62 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.279975ms" +ts=2024-05-02T12:17:22.36084384Z caller=http.go:194 level=debug traceID=047bde3e9f1a5767 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.573426ms" +ts=2024-05-02T12:17:22.360293242Z caller=http.go:194 level=debug traceID=6a235ca8b94f62de orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.749934ms" +ts=2024-05-02T12:17:22.359730555Z caller=http.go:194 level=debug traceID=61e65207ceb5ee7a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 326.732µs" +ts=2024-05-02T12:17:22.359469367Z caller=http.go:194 level=debug traceID=6ed0222d1f224591 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.430204ms" +ts=2024-05-02T12:17:22.359456303Z caller=http.go:194 level=debug traceID=55bbe53adf1241a0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 374.648µs" +ts=2024-05-02T12:17:22.359049925Z caller=http.go:194 level=debug traceID=02ae3f853d6b667e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.995209ms" +ts=2024-05-02T12:17:22.358663636Z caller=http.go:194 level=debug traceID=5ffd0c05f7efaa18 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.8354ms" +ts=2024-05-02T12:17:22.355987214Z caller=http.go:194 level=debug traceID=73fea8713ce19548 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.475803ms" +ts=2024-05-02T12:17:22.35582787Z caller=http.go:194 level=debug traceID=570e940c253c5b34 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.184045ms" +ts=2024-05-02T12:17:22.353660985Z caller=http.go:194 level=debug traceID=047bde3e9f1a5767 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.376639ms" +ts=2024-05-02T12:17:22.354131908Z caller=http.go:194 level=debug traceID=1555fb2fb5af1241 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.006624ms" +ts=2024-05-02T12:17:22.354034709Z caller=http.go:194 level=debug traceID=43aa9f46dae01e5f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.633786ms" +ts=2024-05-02T12:17:22.353195219Z caller=http.go:194 level=debug traceID=12a172e21e4820b8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.338159ms" +ts=2024-05-02T12:17:22.353018723Z caller=http.go:194 level=debug traceID=23657a4a9620f9ca orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.338ms" +ts=2024-05-02T12:17:22.352642073Z caller=http.go:194 level=debug traceID=74395099ddb0fd56 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.778156ms" +ts=2024-05-02T12:17:22.352717222Z caller=http.go:194 level=debug traceID=1c6960b86b417f18 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.736788ms" +ts=2024-05-02T12:17:22.352636336Z caller=http.go:194 level=debug traceID=737e7052bdd22e57 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.689645ms" +ts=2024-05-02T12:17:22.352006041Z caller=http.go:194 level=debug traceID=059d6a8895da22c8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.901472ms" +ts=2024-05-02T12:17:22.351455799Z caller=http.go:194 level=debug traceID=1fcc7c453eb51eb0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.64613ms" +ts=2024-05-02T12:17:22.350928881Z caller=http.go:194 level=debug traceID=02ae3f853d6b667e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.007754ms" +ts=2024-05-02T12:17:22.350849939Z caller=http.go:194 level=debug traceID=1fe9caebc36b03c0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.69769ms" +ts=2024-05-02T12:17:22.350668731Z caller=http.go:194 level=debug traceID=4cbce1c04dfac09e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.131548ms" +ts=2024-05-02T12:17:22.350449887Z caller=http.go:194 level=debug traceID=3a8756bd12303e62 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.584321ms" +ts=2024-05-02T12:17:22.350035388Z caller=http.go:194 level=debug traceID=11278a80cc5a2f2e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.622657ms" +ts=2024-05-02T12:17:22.34983639Z caller=http.go:194 level=debug traceID=1e882f96f343870b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.403488ms" +ts=2024-05-02T12:17:22.349279926Z caller=http.go:194 level=debug traceID=61e65207ceb5ee7a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 364.793µs" +ts=2024-05-02T12:17:22.348279723Z caller=http.go:194 level=debug traceID=3716cf8dd701c519 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.871487ms" +ts=2024-05-02T12:17:22.346228137Z caller=http.go:194 level=debug traceID=570e940c253c5b34 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.695294ms" +ts=2024-05-02T12:17:22.345922952Z caller=http.go:194 level=debug traceID=2c789ff13b36aaa2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.077035ms" +ts=2024-05-02T12:17:22.345403979Z caller=http.go:194 level=debug traceID=22bb3140a3699bdc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.494477ms" +ts=2024-05-02T12:17:22.344391271Z caller=http.go:194 level=debug traceID=2570daf3cbd8524d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.145845ms" +ts=2024-05-02T12:17:22.343422551Z caller=http.go:194 level=debug traceID=1555fb2fb5af1241 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.608779ms" +ts=2024-05-02T12:17:22.34255693Z caller=http.go:194 level=debug traceID=43aa9f46dae01e5f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.153551ms" +ts=2024-05-02T12:17:22.342258108Z caller=http.go:194 level=debug traceID=12a172e21e4820b8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.602ms" +ts=2024-05-02T12:17:22.34215525Z caller=http.go:194 level=debug traceID=23657a4a9620f9ca orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.731505ms" +ts=2024-05-02T12:17:22.341556919Z caller=http.go:194 level=debug traceID=1c6960b86b417f18 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.320506ms" +ts=2024-05-02T12:17:22.341543224Z caller=http.go:194 level=debug traceID=648eb564f6ed101a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.335673ms" +ts=2024-05-02T12:17:22.341147351Z caller=http.go:194 level=debug traceID=2d490329e8c4df9d orgID=3648 msg="POST /push.v1.PusherService/Push (400) 144.232µs" +ts=2024-05-02T12:17:22.340784561Z caller=http.go:194 level=debug traceID=059d6a8895da22c8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.858019ms" +ts=2024-05-02T12:17:22.339454561Z caller=http.go:194 level=debug traceID=11278a80cc5a2f2e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.584516ms" +ts=2024-05-02T12:17:22.338902961Z caller=http.go:194 level=debug traceID=5217fd0c96893fd8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.095878ms" +ts=2024-05-02T12:17:22.338116032Z caller=http.go:194 level=debug traceID=1e882f96f343870b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.123038ms" +ts=2024-05-02T12:17:22.337962847Z caller=http.go:194 level=debug traceID=3716cf8dd701c519 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.725292ms" +ts=2024-05-02T12:17:22.337484888Z caller=http.go:194 level=debug traceID=40abe2feb9b9f5b2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.623624ms" +ts=2024-05-02T12:17:22.336485192Z caller=http.go:194 level=debug traceID=657a3ea08eb2929c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.200072ms" +ts=2024-05-02T12:17:22.336507955Z caller=http.go:194 level=debug traceID=13a2be01bc3b42dc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.970656ms" +ts=2024-05-02T12:17:22.335337109Z caller=http.go:194 level=debug traceID=1f720ef0cf75554e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.772851ms" +ts=2024-05-02T12:17:22.334935225Z caller=http.go:194 level=debug traceID=22bb3140a3699bdc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.03238ms" +ts=2024-05-02T12:17:22.334939818Z caller=http.go:194 level=debug traceID=2c789ff13b36aaa2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.106223ms" +ts=2024-05-02T12:17:22.334359122Z caller=http.go:194 level=debug traceID=3d4d5ee59696cf0a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.641834ms" +ts=2024-05-02T12:17:22.333186801Z caller=http.go:194 level=debug traceID=7d1d9fb63db43d37 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.423081ms" +ts=2024-05-02T12:17:22.333130596Z caller=http.go:194 level=debug traceID=2570daf3cbd8524d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.481287ms" +ts=2024-05-02T12:17:22.332413872Z caller=http.go:194 level=debug traceID=14a2acf71ae8a3dc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.511043ms" +ts=2024-05-02T12:17:22.331233551Z caller=http.go:194 level=debug traceID=648eb564f6ed101a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.031783ms" +ts=2024-05-02T12:17:22.330410494Z caller=http.go:194 level=debug traceID=40abe2feb9b9f5b2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.553475ms" +ts=2024-05-02T12:17:22.330570487Z caller=http.go:194 level=debug traceID=2215af0bd8b27393 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.455465ms" +ts=2024-05-02T12:17:22.329768904Z caller=http.go:194 level=debug traceID=2d490329e8c4df9d orgID=1218 msg="POST /push.v1.PusherService/Push (400) 135.13µs" +ts=2024-05-02T12:17:22.329359968Z caller=http.go:194 level=debug traceID=191c13dafb7ce325 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.594041ms" +ts=2024-05-02T12:17:22.328484398Z caller=http.go:194 level=debug traceID=377bfe479e55d0fb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.730301ms" +ts=2024-05-02T12:17:22.328364855Z caller=http.go:194 level=debug traceID=321f0c1efb26de64 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.820318ms" +ts=2024-05-02T12:17:22.328323748Z caller=http.go:194 level=debug traceID=154643a2a08e5049 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.810123ms" +ts=2024-05-02T12:17:22.327977203Z caller=http.go:194 level=debug traceID=711b716881ecdb87 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.695914ms" +ts=2024-05-02T12:17:22.327411611Z caller=http.go:194 level=debug traceID=5217fd0c96893fd8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.841517ms" +ts=2024-05-02T12:17:22.326961198Z caller=http.go:194 level=debug traceID=1d750e60e3fd8c40 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 16.308328ms" +ts=2024-05-02T12:17:22.326524798Z caller=http.go:194 level=debug traceID=55d22a09b09988ef orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.638219ms" +ts=2024-05-02T12:17:22.326361324Z caller=http.go:194 level=debug traceID=21e2b22a250c2dd1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.625237ms" +ts=2024-05-02T12:17:22.325797951Z caller=http.go:194 level=debug traceID=13a2be01bc3b42dc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.680966ms" +ts=2024-05-02T12:17:22.325649189Z caller=http.go:194 level=debug traceID=339a3956622d6ccf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.099682ms" +ts=2024-05-02T12:17:22.325519912Z caller=http.go:194 level=debug traceID=657a3ea08eb2929c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.29184ms" +ts=2024-05-02T12:17:22.324769324Z caller=http.go:194 level=debug traceID=1f720ef0cf75554e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.441704ms" +ts=2024-05-02T12:17:22.324136164Z caller=http.go:194 level=debug traceID=5ffd6a983a743f56 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.306201ms" +ts=2024-05-02T12:17:22.324098739Z caller=http.go:194 level=debug traceID=44f0487b56c54f99 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.009953ms" +ts=2024-05-02T12:17:22.322816859Z caller=http.go:194 level=debug traceID=313606f439884292 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.519884ms" +ts=2024-05-02T12:17:22.322558814Z caller=http.go:194 level=debug traceID=3d4d5ee59696cf0a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.569269ms" +ts=2024-05-02T12:17:22.322501342Z caller=http.go:194 level=debug traceID=14a2acf71ae8a3dc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.682565ms" +ts=2024-05-02T12:17:22.321218974Z caller=http.go:194 level=debug traceID=2796859ae4301233 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.715975ms" +ts=2024-05-02T12:17:22.320858449Z caller=http.go:194 level=debug traceID=62dd4dd7c9829bc7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.474875ms" +ts=2024-05-02T12:17:22.320603026Z caller=http.go:194 level=debug traceID=341ab094a020d7ce orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.994627ms" +ts=2024-05-02T12:17:22.32053114Z caller=http.go:194 level=debug traceID=7d1d9fb63db43d37 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.357502ms" +ts=2024-05-02T12:17:22.319427376Z caller=http.go:194 level=debug traceID=711b716881ecdb87 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.20044ms" +ts=2024-05-02T12:17:22.319097739Z caller=http.go:194 level=debug traceID=2215af0bd8b27393 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.185242ms" +ts=2024-05-02T12:17:22.318890471Z caller=http.go:194 level=debug traceID=321f0c1efb26de64 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.817314ms" +ts=2024-05-02T12:17:22.318184357Z caller=http.go:194 level=debug traceID=191c13dafb7ce325 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.782991ms" +ts=2024-05-02T12:17:22.317588214Z caller=http.go:194 level=debug traceID=112c6fc6c783b785 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.678709ms" +ts=2024-05-02T12:17:22.3174381Z caller=http.go:194 level=debug traceID=55d22a09b09988ef orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.254584ms" +ts=2024-05-02T12:17:22.317416616Z caller=http.go:194 level=debug traceID=3218bc06e6bb0951 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.094703ms" +ts=2024-05-02T12:17:22.31679697Z caller=http.go:194 level=debug traceID=377bfe479e55d0fb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.597302ms" +ts=2024-05-02T12:17:22.316324057Z caller=http.go:194 level=debug traceID=1d750e60e3fd8c40 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.015773ms" +ts=2024-05-02T12:17:22.315907148Z caller=http.go:194 level=debug traceID=21e2b22a250c2dd1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.338129ms" +ts=2024-05-02T12:17:22.315812442Z caller=http.go:194 level=debug traceID=339a3956622d6ccf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.274532ms" +ts=2024-05-02T12:17:22.315359911Z caller=http.go:194 level=debug traceID=154643a2a08e5049 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.692424ms" +ts=2024-05-02T12:17:22.314323349Z caller=http.go:194 level=debug traceID=2c9337308be8afb7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.542992ms" +ts=2024-05-02T12:17:22.313686938Z caller=http.go:194 level=debug traceID=577d067f6c58e048 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.597618ms" +ts=2024-05-02T12:17:22.313438546Z caller=http.go:194 level=debug traceID=195b5ca6b5f8bb7f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.678831ms" +ts=2024-05-02T12:17:22.312346318Z caller=http.go:194 level=debug traceID=5ffd6a983a743f56 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.522623ms" +ts=2024-05-02T12:17:22.311110402Z caller=http.go:194 level=debug traceID=44f0487b56c54f99 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.480646ms" +ts=2024-05-02T12:17:22.311005125Z caller=http.go:194 level=debug traceID=62dd4dd7c9829bc7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.871035ms" +ts=2024-05-02T12:17:22.310241156Z caller=http.go:194 level=debug traceID=341ab094a020d7ce orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.690669ms" +ts=2024-05-02T12:17:22.309585401Z caller=http.go:194 level=debug traceID=50197d401fcf2cc6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.801319ms" +ts=2024-05-02T12:17:22.309378229Z caller=http.go:194 level=debug traceID=2796859ae4301233 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.19976ms" +ts=2024-05-02T12:17:22.309267685Z caller=http.go:194 level=debug traceID=0469ed7cf3b5ec11 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.337116ms" +ts=2024-05-02T12:17:22.309218578Z caller=http.go:194 level=debug traceID=52e689100fa7a595 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.063352ms" +ts=2024-05-02T12:17:22.308630341Z caller=http.go:194 level=debug traceID=313606f439884292 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.82171ms" +ts=2024-05-02T12:17:22.307426348Z caller=http.go:194 level=debug traceID=3218bc06e6bb0951 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.381116ms" +ts=2024-05-02T12:17:22.30725168Z caller=http.go:194 level=debug traceID=2fbf8162520d21b4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.686092ms" +ts=2024-05-02T12:17:22.306149447Z caller=http.go:194 level=debug traceID=1b9bb92dd7002195 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.471654ms" +ts=2024-05-02T12:17:22.305804227Z caller=http.go:194 level=debug traceID=112c6fc6c783b785 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.599835ms" +ts=2024-05-02T12:17:22.30557422Z caller=http.go:194 level=debug traceID=3aa5d9d146fe568d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.583403ms" +ts=2024-05-02T12:17:22.305509471Z caller=http.go:194 level=debug traceID=6bcd599518f976e2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.986841ms" +ts=2024-05-02T12:17:22.30444504Z caller=http.go:194 level=debug traceID=21ef0f6ce38b3f80 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.660354ms" +ts=2024-05-02T12:17:22.304337508Z caller=http.go:194 level=debug traceID=4fec2c76d860c46b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.886224ms" +ts=2024-05-02T12:17:22.304204642Z caller=http.go:194 level=debug traceID=2c9337308be8afb7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.304764ms" +ts=2024-05-02T12:17:22.301928444Z caller=http.go:194 level=debug traceID=195b5ca6b5f8bb7f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.444899ms" +ts=2024-05-02T12:17:22.301863838Z caller=http.go:194 level=debug traceID=50197d401fcf2cc6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.201796ms" +ts=2024-05-02T12:17:22.301533676Z caller=http.go:194 level=debug traceID=69e82009296e50c8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.510685ms" +ts=2024-05-02T12:17:22.301462364Z caller=http.go:194 level=debug traceID=577d067f6c58e048 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.031918ms" +ts=2024-05-02T12:17:22.299839999Z caller=http.go:194 level=debug traceID=6809066c0d6366c2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.950965ms" +ts=2024-05-02T12:17:22.299605496Z caller=http.go:194 level=debug traceID=21722f23be405b82 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.069603ms" +ts=2024-05-02T12:17:22.298607451Z caller=http.go:194 level=debug traceID=0acace3b7affe7e1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.671875ms" +ts=2024-05-02T12:17:22.298070298Z caller=http.go:194 level=debug traceID=0469ed7cf3b5ec11 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.707515ms" +ts=2024-05-02T12:17:22.297722806Z caller=http.go:194 level=debug traceID=52e689100fa7a595 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.884815ms" +ts=2024-05-02T12:17:22.29744368Z caller=http.go:194 level=debug traceID=207e9a1772bd219d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.136908ms" +ts=2024-05-02T12:17:22.297279638Z caller=http.go:194 level=debug traceID=2fbf8162520d21b4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.825079ms" +ts=2024-05-02T12:17:22.297305612Z caller=http.go:194 level=debug traceID=4a0783cdb749cd26 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.26235ms" +ts=2024-05-02T12:17:22.296873135Z caller=http.go:194 level=debug traceID=38823d7b7ef0a063 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.95288ms" +ts=2024-05-02T12:17:22.29627453Z caller=http.go:194 level=debug traceID=0212015dcd8db54d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.257045ms" +ts=2024-05-02T12:17:22.295799692Z caller=http.go:194 level=debug traceID=3aa5d9d146fe568d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.779076ms" +ts=2024-05-02T12:17:22.295322336Z caller=http.go:194 level=debug traceID=1b9bb92dd7002195 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.937488ms" +ts=2024-05-02T12:17:22.294742685Z caller=http.go:194 level=debug traceID=6bcd599518f976e2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.436195ms" +ts=2024-05-02T12:17:22.293565607Z caller=http.go:194 level=debug traceID=21ef0f6ce38b3f80 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.94269ms" +ts=2024-05-02T12:17:22.293339439Z caller=http.go:194 level=debug traceID=4fec2c76d860c46b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.990504ms" +ts=2024-05-02T12:17:22.292093346Z caller=http.go:194 level=debug traceID=1548cc61c100cae6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.879105ms" +ts=2024-05-02T12:17:22.2917934Z caller=http.go:194 level=debug traceID=22e67262170fa62c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.822006ms" +ts=2024-05-02T12:17:22.291552413Z caller=http.go:194 level=debug traceID=2fb8ca06ac50dbf1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.072128ms" +ts=2024-05-02T12:17:22.290241937Z caller=http.go:194 level=debug traceID=3de66bdad2f6a747 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.918802ms" +ts=2024-05-02T12:17:22.289807567Z caller=http.go:194 level=debug traceID=69e82009296e50c8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.033906ms" +ts=2024-05-02T12:17:22.287760311Z caller=http.go:194 level=debug traceID=6809066c0d6366c2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.798086ms" +ts=2024-05-02T12:17:22.287727033Z caller=http.go:194 level=debug traceID=4a0783cdb749cd26 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.674046ms" +ts=2024-05-02T12:17:22.287713022Z caller=http.go:194 level=debug traceID=21722f23be405b82 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.698237ms" +ts=2024-05-02T12:17:22.287665737Z caller=http.go:194 level=debug traceID=0acace3b7affe7e1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.795676ms" +ts=2024-05-02T12:17:22.28715361Z caller=http.go:194 level=debug traceID=0212015dcd8db54d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.518042ms" +ts=2024-05-02T12:17:22.286179505Z caller=http.go:194 level=debug traceID=38823d7b7ef0a063 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.847523ms" +ts=2024-05-02T12:17:22.286016535Z caller=http.go:194 level=debug traceID=207e9a1772bd219d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.048981ms" +ts=2024-05-02T12:17:22.285363127Z caller=http.go:194 level=debug traceID=0ee7a7c1526b4cde orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.034952ms" +ts=2024-05-02T12:17:22.282987875Z caller=http.go:194 level=debug traceID=253c1988e270df61 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.773252ms" +ts=2024-05-02T12:17:22.282989313Z caller=http.go:194 level=debug traceID=12e62b3bbe0e28ae orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.244864ms" +ts=2024-05-02T12:17:22.282343181Z caller=http.go:194 level=debug traceID=0a8fa86890f8b8a8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.481971ms" +ts=2024-05-02T12:17:22.281917762Z caller=http.go:194 level=debug traceID=22e67262170fa62c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.09531ms" +ts=2024-05-02T12:17:22.281774633Z caller=http.go:194 level=debug traceID=39b3bd8aff5c3be2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.351383ms" +ts=2024-05-02T12:17:22.281294502Z caller=http.go:194 level=debug traceID=1548cc61c100cae6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.377643ms" +ts=2024-05-02T12:17:22.279284053Z caller=http.go:194 level=debug traceID=2fb8ca06ac50dbf1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.676263ms" +ts=2024-05-02T12:17:22.279005692Z caller=http.go:194 level=debug traceID=3de66bdad2f6a747 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.950558ms" +ts=2024-05-02T12:17:22.278838831Z caller=http.go:194 level=debug traceID=7f31f04e7ced4022 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.404228ms" +ts=2024-05-02T12:17:22.277757365Z caller=http.go:194 level=debug traceID=2fae464425584966 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.603953ms" +ts=2024-05-02T12:17:22.277477083Z caller=http.go:194 level=debug traceID=1833fe7aa781eb6d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.815365ms" +ts=2024-05-02T12:17:22.274893021Z caller=http.go:194 level=debug traceID=4a1ec9ffb7106856 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.164484ms" +ts=2024-05-02T12:17:22.273327657Z caller=http.go:194 level=debug traceID=0ee7a7c1526b4cde orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.124387ms" +ts=2024-05-02T12:17:22.273171195Z caller=http.go:194 level=debug traceID=56378ad5e981cd4b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.551868ms" +ts=2024-05-02T12:17:22.272971969Z caller=http.go:194 level=debug traceID=55b543cdc11ffc37 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.947207ms" +ts=2024-05-02T12:17:22.272806052Z caller=http.go:194 level=debug traceID=253c1988e270df61 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.67423ms" +ts=2024-05-02T12:17:22.272570846Z caller=http.go:194 level=debug traceID=7f31f04e7ced4022 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 19.901363ms" +ts=2024-05-02T12:17:22.2714135Z caller=http.go:194 level=debug traceID=39b3bd8aff5c3be2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.965662ms" +ts=2024-05-02T12:17:22.271246789Z caller=http.go:194 level=debug traceID=0a8fa86890f8b8a8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.810075ms" +ts=2024-05-02T12:17:22.271285577Z caller=http.go:194 level=debug traceID=12e62b3bbe0e28ae orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.286791ms" +ts=2024-05-02T12:17:22.271125708Z caller=http.go:194 level=debug traceID=1edbb06a23a2f45e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.855971ms" +ts=2024-05-02T12:17:22.271052536Z caller=http.go:194 level=debug traceID=1cd03c645d3f9006 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.347056ms" +ts=2024-05-02T12:17:22.270135004Z caller=http.go:194 level=debug traceID=0eec21d415ec6fd1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.893041ms" +ts=2024-05-02T12:17:22.268694008Z caller=http.go:194 level=debug traceID=7893b025baf230c3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.186836ms" +ts=2024-05-02T12:17:22.26840278Z caller=http.go:194 level=debug traceID=5a4a3a70a8a23900 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.146198ms" +ts=2024-05-02T12:17:22.268268538Z caller=http.go:194 level=debug traceID=19ec1e2be0cdc57f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.417413ms" +ts=2024-05-02T12:17:22.268237093Z caller=http.go:194 level=debug traceID=06d494760e6d8389 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.087015ms" +ts=2024-05-02T12:17:22.267396452Z caller=http.go:194 level=debug traceID=1f2785f00a0f0f01 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.969549ms" +ts=2024-05-02T12:17:22.267145369Z caller=http.go:194 level=debug traceID=2fae464425584966 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.661125ms" +ts=2024-05-02T12:17:22.26649328Z caller=http.go:194 level=debug traceID=1833fe7aa781eb6d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.222523ms" +ts=2024-05-02T12:17:22.265749724Z caller=http.go:194 level=debug traceID=62dccc6bfc62c252 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.339734ms" +ts=2024-05-02T12:17:22.264498955Z caller=http.go:194 level=debug traceID=58d29a0dd0bac584 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.117289ms" +ts=2024-05-02T12:17:22.264009862Z caller=http.go:194 level=debug traceID=4a1ec9ffb7106856 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.540318ms" +ts=2024-05-02T12:17:22.263248485Z caller=http.go:194 level=debug traceID=56378ad5e981cd4b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.712041ms" +ts=2024-05-02T12:17:22.262264341Z caller=http.go:194 level=debug traceID=55b543cdc11ffc37 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.344625ms" +ts=2024-05-02T12:17:22.260804303Z caller=http.go:194 level=debug traceID=37481971fe211af4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.537409ms" +ts=2024-05-02T12:17:22.260579597Z caller=http.go:194 level=debug traceID=1cd03c645d3f9006 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.730264ms" +ts=2024-05-02T12:17:22.260476256Z caller=http.go:194 level=debug traceID=1edbb06a23a2f45e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.822182ms" +ts=2024-05-02T12:17:22.260252171Z caller=http.go:194 level=debug traceID=0eec21d415ec6fd1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.945154ms" +ts=2024-05-02T12:17:22.259503386Z caller=http.go:194 level=debug traceID=76400798b0aa5145 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.667071ms" +ts=2024-05-02T12:17:22.259152039Z caller=http.go:194 level=debug traceID=06d494760e6d8389 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.49976ms" +ts=2024-05-02T12:17:22.259320746Z caller=http.go:194 level=debug traceID=0bd53d59bb82a190 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.694953ms" +ts=2024-05-02T12:17:22.258596033Z caller=http.go:194 level=debug traceID=5eea7112e317aa99 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 48.254482ms" +ts=2024-05-02T12:17:22.258498863Z caller=http.go:194 level=debug traceID=10427a372e08c4ce orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.47711ms" +ts=2024-05-02T12:17:22.257681647Z caller=http.go:194 level=debug traceID=21237014afb8a343 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.767598ms" +ts=2024-05-02T12:17:22.257403564Z caller=http.go:194 level=debug traceID=19ec1e2be0cdc57f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.632954ms" +ts=2024-05-02T12:17:22.25701487Z caller=http.go:194 level=debug traceID=135bf90d99a8fa69 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.06337ms" +ts=2024-05-02T12:17:22.256796045Z caller=http.go:194 level=debug traceID=5a4a3a70a8a23900 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.832638ms" +ts=2024-05-02T12:17:22.256696496Z caller=http.go:194 level=debug traceID=2ed6442d04c6ceb0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 252.25µs" +ts=2024-05-02T12:17:22.256628225Z caller=http.go:194 level=debug traceID=1f2785f00a0f0f01 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.57812ms" +ts=2024-05-02T12:17:22.255697316Z caller=http.go:194 level=debug traceID=7893b025baf230c3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.107854ms" +ts=2024-05-02T12:17:22.254808073Z caller=http.go:194 level=debug traceID=52c71ec9454f4df6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.831528ms" +ts=2024-05-02T12:17:22.254048422Z caller=http.go:194 level=debug traceID=46639539ca052b15 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.414334ms" +ts=2024-05-02T12:17:22.253360841Z caller=http.go:194 level=debug traceID=58d29a0dd0bac584 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.240076ms" +ts=2024-05-02T12:17:22.253056669Z caller=http.go:194 level=debug traceID=62dccc6bfc62c252 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.216326ms" +ts=2024-05-02T12:17:22.252759123Z caller=http.go:194 level=debug traceID=0bafdc81dd73e1de orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.747971ms" +ts=2024-05-02T12:17:22.252788072Z caller=http.go:194 level=debug traceID=5a50da884e4330c5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.103578ms" +ts=2024-05-02T12:17:22.252525454Z caller=http.go:194 level=debug traceID=48318c1eb34656e9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.223674ms" +ts=2024-05-02T12:17:22.252317912Z caller=http.go:194 level=debug traceID=6e3f2ae5bc85bcef orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.704477ms" +ts=2024-05-02T12:17:22.252161765Z caller=http.go:194 level=debug traceID=681a8d805f5f1243 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.044282ms" +ts=2024-05-02T12:17:22.252083837Z caller=http.go:194 level=debug traceID=37481971fe211af4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.688189ms" +ts=2024-05-02T12:17:22.252061628Z caller=http.go:194 level=debug traceID=65651cb34dafe1bc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.718289ms" +ts=2024-05-02T12:17:22.250404676Z caller=http.go:194 level=debug traceID=31e28f3bb17c196d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.076264ms" +ts=2024-05-02T12:17:22.249836373Z caller=http.go:194 level=debug traceID=27e290173d6c2243 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.820613ms" +ts=2024-05-02T12:17:22.249744075Z caller=http.go:194 level=debug traceID=6cba3a72c3a04528 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.826742ms" +ts=2024-05-02T12:17:22.248391821Z caller=http.go:194 level=debug traceID=0bd53d59bb82a190 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.115041ms" +ts=2024-05-02T12:17:22.248160606Z caller=http.go:194 level=debug traceID=10427a372e08c4ce orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.95115ms" +ts=2024-05-02T12:17:22.248178281Z caller=http.go:194 level=debug traceID=6ec777e1f220b0f2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.612253ms" +ts=2024-05-02T12:17:22.247735997Z caller=http.go:194 level=debug traceID=76400798b0aa5145 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.167452ms" +ts=2024-05-02T12:17:22.247530905Z caller=http.go:194 level=debug traceID=21237014afb8a343 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.706978ms" +ts=2024-05-02T12:17:22.247410491Z caller=http.go:194 level=debug traceID=2ed6442d04c6ceb0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 944.37µs" +ts=2024-05-02T12:17:22.246775796Z caller=http.go:194 level=debug traceID=008c0e486e5e44ac orgID=75 msg="POST /ingest?aggregationType=&from=1714652227232613927&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=0&spyName=gospy&units=&until=1714652242232506798 (200) 4.531647ms" +ts=2024-05-02T12:17:22.245292105Z caller=http.go:194 level=debug traceID=135319461ac6bb36 orgID=75 msg="POST /ingest?aggregationType=&from=1714652227232613927&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=0&spyName=gospy&units=&until=1714652242232506798 (200) 3.214615ms" +ts=2024-05-02T12:17:22.245266079Z caller=http.go:194 level=debug traceID=71dd2f801edb388c orgID=75 msg="POST /ingest?aggregationType=&from=1714652227232613927&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=100&spyName=gospy&units=&until=1714652242232506798 (200) 2.813184ms" +ts=2024-05-02T12:17:22.244846471Z caller=http.go:194 level=debug traceID=6e3f2ae5bc85bcef orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.69245ms" +ts=2024-05-02T12:17:22.244900743Z caller=http.go:194 level=debug traceID=135bf90d99a8fa69 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.636822ms" +ts=2024-05-02T12:17:22.244918037Z caller=http.go:194 level=debug traceID=52c71ec9454f4df6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.413285ms" +ts=2024-05-02T12:17:22.244855455Z caller=http.go:194 level=debug traceID=4fba10ed6526c104 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.077073ms" +ts=2024-05-02T12:17:22.243734635Z caller=http.go:194 level=debug traceID=14cf7b7af3efb470 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.912375ms" +ts=2024-05-02T12:17:22.243660854Z caller=http.go:194 level=debug traceID=61b23eff06c17a64 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.303249ms" +ts=2024-05-02T12:17:22.243378528Z caller=http.go:194 level=debug traceID=46639539ca052b15 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.121409ms" +ts=2024-05-02T12:17:22.242343806Z caller=http.go:194 level=debug traceID=404c6a83a18e66a4 orgID=75 msg="POST /ingest?aggregationType=average&from=1714652227232613927&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=0&spyName=gospy&units=goroutines&until=1714652242232506798 (200) 2.902485ms" +ts=2024-05-02T12:17:22.242284131Z caller=http.go:194 level=debug traceID=094b63abeb694bd4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.849934ms" +ts=2024-05-02T12:17:22.242242947Z caller=http.go:194 level=debug traceID=48318c1eb34656e9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.927614ms" +ts=2024-05-02T12:17:22.242244235Z caller=http.go:194 level=debug traceID=5a50da884e4330c5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 16.633144ms" +ts=2024-05-02T12:17:22.241255085Z caller=http.go:194 level=debug traceID=3bdd9c78791cb9ce orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.495767ms" +ts=2024-05-02T12:17:22.240811493Z caller=http.go:194 level=debug traceID=0bafdc81dd73e1de orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.492358ms" +ts=2024-05-02T12:17:22.240285602Z caller=http.go:194 level=debug traceID=65651cb34dafe1bc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.413581ms" +ts=2024-05-02T12:17:22.239935934Z caller=http.go:194 level=debug traceID=6cba3a72c3a04528 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.05797ms" +ts=2024-05-02T12:17:22.239324Z caller=http.go:194 level=debug traceID=3432f09971721654 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.746625ms" +ts=2024-05-02T12:17:22.239203775Z caller=http.go:194 level=debug traceID=31e28f3bb17c196d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.061709ms" +ts=2024-05-02T12:17:22.238652107Z caller=http.go:194 level=debug traceID=681a8d805f5f1243 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.986382ms" +ts=2024-05-02T12:17:22.238321127Z caller=http.go:194 level=debug traceID=27e290173d6c2243 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.260369ms" +ts=2024-05-02T12:17:22.2374025Z caller=http.go:194 level=debug traceID=1f47402609789c7c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.907569ms" +ts=2024-05-02T12:17:22.237126994Z caller=http.go:194 level=debug traceID=1f4e2732852dc417 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.868764ms" +ts=2024-05-02T12:17:22.237115617Z caller=http.go:194 level=debug traceID=6ec777e1f220b0f2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.971594ms" +ts=2024-05-02T12:17:22.234830962Z caller=http.go:194 level=debug traceID=2211aff9bc8308d4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.40421ms" +ts=2024-05-02T12:17:22.233393571Z caller=http.go:194 level=debug traceID=4fba10ed6526c104 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.975188ms" +ts=2024-05-02T12:17:22.233315636Z caller=http.go:194 level=debug traceID=61b23eff06c17a64 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.146458ms" +ts=2024-05-02T12:17:22.232769863Z caller=http.go:194 level=debug traceID=0ad201363a3f30ef orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.41825ms" +ts=2024-05-02T12:17:22.232135205Z caller=http.go:194 level=debug traceID=14cf7b7af3efb470 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.907402ms" +ts=2024-05-02T12:17:22.231846501Z caller=http.go:194 level=debug traceID=3bdd9c78791cb9ce orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.265851ms" +ts=2024-05-02T12:17:22.231785105Z caller=http.go:194 level=debug traceID=6c088d67b44a80a7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 268.362µs" +ts=2024-05-02T12:17:22.231258006Z caller=http.go:194 level=debug traceID=094b63abeb694bd4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.874586ms" +ts=2024-05-02T12:17:22.230443761Z caller=http.go:194 level=debug traceID=5b9c6e0a2e9bdf52 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.825746ms" +ts=2024-05-02T12:17:22.230028052Z caller=http.go:194 level=debug traceID=2b2bb28ace6ac4e6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.029785ms" +ts=2024-05-02T12:17:22.229355258Z caller=http.go:194 level=debug traceID=3432f09971721654 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.025565ms" +ts=2024-05-02T12:17:22.229128916Z caller=http.go:194 level=debug traceID=7813160fbabe4f7b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.203937ms" +ts=2024-05-02T12:17:22.229184843Z caller=http.go:194 level=debug traceID=4dafacf3b5537099 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.269913ms" +ts=2024-05-02T12:17:22.228018337Z caller=http.go:194 level=debug traceID=6e56a603bb4cee39 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.872834ms" +ts=2024-05-02T12:17:22.227693581Z caller=http.go:194 level=debug traceID=4abcdb03009ffea4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 11.151559ms" +ts=2024-05-02T12:17:22.227391725Z caller=http.go:194 level=debug traceID=1f47402609789c7c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.837271ms" +ts=2024-05-02T12:17:22.226805873Z caller=http.go:194 level=debug traceID=0abe3d3570d6289e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.262796ms" +ts=2024-05-02T12:17:22.225856187Z caller=http.go:194 level=debug traceID=760b2b72edd55bd8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.77916ms" +ts=2024-05-02T12:17:22.224739311Z caller=http.go:194 level=debug traceID=1f4e2732852dc417 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.088637ms" +ts=2024-05-02T12:17:22.224499041Z caller=http.go:194 level=debug traceID=4dafacf3b5537099 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.204637ms" +ts=2024-05-02T12:17:22.224580657Z caller=http.go:194 level=debug traceID=065cf5481e2a1e23 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 45.975758ms" +ts=2024-05-02T12:17:22.223554253Z caller=http.go:194 level=debug traceID=2b2bb28ace6ac4e6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.712246ms" +ts=2024-05-02T12:17:22.223355303Z caller=http.go:194 level=debug traceID=3b6219f5a26a0fe8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.815597ms" +ts=2024-05-02T12:17:22.222985136Z caller=http.go:194 level=debug traceID=2211aff9bc8308d4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.096726ms" +ts=2024-05-02T12:17:22.222935288Z caller=http.go:194 level=debug traceID=1973fff37ecde24a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.136893ms" +ts=2024-05-02T12:17:22.22194318Z caller=http.go:194 level=debug traceID=0ad201363a3f30ef orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.138166ms" +ts=2024-05-02T12:17:22.22062369Z caller=http.go:194 level=debug traceID=6c088d67b44a80a7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 326.296µs" +ts=2024-05-02T12:17:22.220244015Z caller=http.go:194 level=debug traceID=5f7a55613b6fbddd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.558877ms" +ts=2024-05-02T12:17:22.219621812Z caller=http.go:194 level=debug traceID=1d394e647625d40a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.472116ms" +ts=2024-05-02T12:17:22.219650508Z caller=http.go:194 level=debug traceID=5b9c6e0a2e9bdf52 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.573168ms" +ts=2024-05-02T12:17:22.218532051Z caller=http.go:194 level=debug traceID=7813160fbabe4f7b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.944241ms" +ts=2024-05-02T12:17:22.218231371Z caller=http.go:194 level=debug traceID=5eea7112e317aa99 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 20.918559ms" +ts=2024-05-02T12:17:22.218248006Z caller=http.go:194 level=debug traceID=3812f9ec02717985 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.944782ms" +ts=2024-05-02T12:17:22.217999491Z caller=http.go:194 level=debug traceID=306b04d70280fea3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 197.591µs" +ts=2024-05-02T12:17:22.217690431Z caller=http.go:194 level=debug traceID=6d5cb7f4eaf11850 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.246097ms" +ts=2024-05-02T12:17:22.217455353Z caller=http.go:194 level=debug traceID=31c6f5fa1fa36d98 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.065055ms" +ts=2024-05-02T12:17:22.217163986Z caller=http.go:194 level=debug traceID=70c1bebc99e4cb3e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 552.315µs" +ts=2024-05-02T12:17:22.216866847Z caller=http.go:194 level=debug traceID=2cd374ad25dc14f3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.578533ms" +ts=2024-05-02T12:17:22.216620145Z caller=http.go:194 level=debug traceID=6e56a603bb4cee39 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.689527ms" +ts=2024-05-02T12:17:22.216576357Z caller=http.go:194 level=debug traceID=40aa061a03ad6acf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.503105ms" +ts=2024-05-02T12:17:22.215555221Z caller=http.go:194 level=debug traceID=0abe3d3570d6289e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.714913ms" +ts=2024-05-02T12:17:22.215428501Z caller=http.go:194 level=debug traceID=37e024ca0f02a2d1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 44.865817ms" +ts=2024-05-02T12:17:22.215020427Z caller=http.go:194 level=debug traceID=760b2b72edd55bd8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.028465ms" +ts=2024-05-02T12:17:22.214633886Z caller=http.go:194 level=debug traceID=2053f0cedc4fa5ed orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.864768ms" +ts=2024-05-02T12:17:22.213579709Z caller=http.go:194 level=debug traceID=4abcdb03009ffea4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 8.901429ms" +ts=2024-05-02T12:17:22.213186751Z caller=http.go:194 level=debug traceID=034c961117a603df orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.530472ms" +ts=2024-05-02T12:17:22.212971554Z caller=http.go:194 level=debug traceID=3b6219f5a26a0fe8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.501431ms" +ts=2024-05-02T12:17:22.212755449Z caller=http.go:194 level=debug traceID=1973fff37ecde24a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.914313ms" +ts=2024-05-02T12:17:22.21053254Z caller=http.go:194 level=debug traceID=57e808c35c4f15ea orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.518227ms" +ts=2024-05-02T12:17:22.20999515Z caller=http.go:194 level=debug traceID=412bfd170d1efb0c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.53929ms" +ts=2024-05-02T12:17:22.209691877Z caller=http.go:194 level=debug traceID=355ea9609e419edd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.144007ms" +ts=2024-05-02T12:17:22.209517627Z caller=http.go:194 level=debug traceID=5f7a55613b6fbddd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.53864ms" +ts=2024-05-02T12:17:22.209363872Z caller=http.go:194 level=debug traceID=7aa61c7097b87bf2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 301.732µs" +ts=2024-05-02T12:17:22.209378429Z caller=http.go:194 level=debug traceID=1d394e647625d40a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.29572ms" +ts=2024-05-02T12:17:22.208201281Z caller=http.go:194 level=debug traceID=306b04d70280fea3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 429.948µs" +ts=2024-05-02T12:17:22.208094835Z caller=http.go:194 level=debug traceID=4f852e94ec3ec6d5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.932056ms" +ts=2024-05-02T12:17:22.207384798Z caller=http.go:194 level=debug traceID=5d5dea8342b8b1ab orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.973712ms" +ts=2024-05-02T12:17:22.206613668Z caller=http.go:194 level=debug traceID=3812f9ec02717985 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.702842ms" +ts=2024-05-02T12:17:22.206413404Z caller=http.go:194 level=debug traceID=6d5cb7f4eaf11850 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.250618ms" +ts=2024-05-02T12:17:22.206477368Z caller=http.go:194 level=debug traceID=2cd374ad25dc14f3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.816595ms" +ts=2024-05-02T12:17:22.205898721Z caller=http.go:194 level=debug traceID=31c6f5fa1fa36d98 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.410091ms" +ts=2024-05-02T12:17:22.205768304Z caller=http.go:194 level=debug traceID=2044c44170f163bb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.923597ms" +ts=2024-05-02T12:17:22.205080044Z caller=http.go:194 level=debug traceID=70c1bebc99e4cb3e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 363.255µs" +ts=2024-05-02T12:17:22.204741519Z caller=http.go:194 level=debug traceID=40aa061a03ad6acf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.737091ms" +ts=2024-05-02T12:17:22.204600588Z caller=http.go:194 level=debug traceID=02b6ad3fb8eeef5f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.34802ms" +ts=2024-05-02T12:17:22.203529931Z caller=http.go:194 level=debug traceID=2053f0cedc4fa5ed orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.312074ms" +ts=2024-05-02T12:17:22.202539907Z caller=http.go:194 level=debug traceID=4b0b48dbe95a23b7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 319.624µs" +ts=2024-05-02T12:17:22.202057473Z caller=http.go:194 level=debug traceID=034c961117a603df orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.693514ms" +ts=2024-05-02T12:17:22.200767046Z caller=http.go:194 level=debug traceID=412bfd170d1efb0c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.551862ms" +ts=2024-05-02T12:17:22.200418089Z caller=http.go:194 level=debug traceID=4d24960e9e00663d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.533672ms" +ts=2024-05-02T12:17:22.200518453Z caller=http.go:194 level=debug traceID=6a70e4979fc28c31 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.224136ms" +ts=2024-05-02T12:17:22.200324642Z caller=http.go:194 level=debug traceID=0f51a7471e9f85c6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.307775ms" +ts=2024-05-02T12:17:22.199246766Z caller=http.go:194 level=debug traceID=355ea9609e419edd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.968505ms" +ts=2024-05-02T12:17:22.199125241Z caller=http.go:194 level=debug traceID=57e808c35c4f15ea orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.48767ms" +ts=2024-05-02T12:17:22.198989661Z caller=http.go:194 level=debug traceID=2044c44170f163bb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 18.540566ms" +ts=2024-05-02T12:17:22.198919745Z caller=http.go:194 level=debug traceID=7aa61c7097b87bf2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 356.489µs" +ts=2024-05-02T12:17:22.197473525Z caller=http.go:194 level=debug traceID=26729dac64a85a4d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.847397ms" +ts=2024-05-02T12:17:22.197346411Z caller=http.go:194 level=debug traceID=5d5dea8342b8b1ab orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.257683ms" +ts=2024-05-02T12:17:22.197019488Z caller=http.go:194 level=debug traceID=6a70e4979fc28c31 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.188566ms" +ts=2024-05-02T12:17:22.19636844Z caller=http.go:194 level=debug traceID=4f852e94ec3ec6d5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.318508ms" +ts=2024-05-02T12:17:22.196094734Z caller=http.go:194 level=debug traceID=36bdaf3c93d8a77c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.557923ms" +ts=2024-05-02T12:17:22.195185945Z caller=http.go:194 level=debug traceID=59fb659203c0c7a1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.715664ms" +ts=2024-05-02T12:17:22.195032532Z caller=http.go:194 level=debug traceID=37e024ca0f02a2d1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 35.619378ms" +ts=2024-05-02T12:17:22.193690476Z caller=http.go:194 level=debug traceID=2803d3f5a2b12bdd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.840664ms" +ts=2024-05-02T12:17:22.193814502Z caller=http.go:194 level=debug traceID=02b6ad3fb8eeef5f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.73021ms" +ts=2024-05-02T12:17:22.192073099Z caller=http.go:194 level=debug traceID=2606069628204526 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.564909ms" +ts=2024-05-02T12:17:22.191717324Z caller=http.go:194 level=debug traceID=472a6ff9612c6ed6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.015901ms" +ts=2024-05-02T12:17:22.191404809Z caller=http.go:194 level=debug traceID=11d78b344aa7763c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.823511ms" +ts=2024-05-02T12:17:22.191335089Z caller=http.go:194 level=debug traceID=21ad8cd957c5b6e9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.256658ms" +ts=2024-05-02T12:17:22.191013825Z caller=http.go:194 level=debug traceID=4b0b48dbe95a23b7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 461.177µs" +ts=2024-05-02T12:17:22.19060189Z caller=http.go:194 level=debug traceID=065cf5481e2a1e23 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 41.371737ms" +ts=2024-05-02T12:17:22.189133381Z caller=http.go:194 level=debug traceID=1b1992933c15d79c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.797742ms" +ts=2024-05-02T12:17:22.188695824Z caller=http.go:194 level=debug traceID=0f51a7471e9f85c6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.864615ms" +ts=2024-05-02T12:17:22.188487914Z caller=http.go:194 level=debug traceID=26729dac64a85a4d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.700326ms" +ts=2024-05-02T12:17:22.188308784Z caller=http.go:194 level=debug traceID=30165ce0055ecc1f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.584184ms" +ts=2024-05-02T12:17:22.187761577Z caller=http.go:194 level=debug traceID=4d24960e9e00663d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.959306ms" +ts=2024-05-02T12:17:22.18747688Z caller=http.go:194 level=debug traceID=7482869538938e3d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 19.145763ms" +ts=2024-05-02T12:17:22.186996355Z caller=http.go:194 level=debug traceID=36bdaf3c93d8a77c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.986533ms" +ts=2024-05-02T12:17:22.186204841Z caller=http.go:194 level=debug traceID=00273096f105e74d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 10.720467ms" +ts=2024-05-02T12:17:22.184928452Z caller=http.go:194 level=debug traceID=742a7bc42608e599 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.904446ms" +ts=2024-05-02T12:17:22.184527646Z caller=http.go:194 level=debug traceID=59fb659203c0c7a1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.933023ms" +ts=2024-05-02T12:17:22.184470889Z caller=http.go:194 level=debug traceID=259acc4340befa45 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.939283ms" +ts=2024-05-02T12:17:22.183504203Z caller=http.go:194 level=debug traceID=5306d29a624d7842 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 18.087018ms" +ts=2024-05-02T12:17:22.182825646Z caller=http.go:194 level=debug traceID=5306d29a624d7842 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.540276ms" +ts=2024-05-02T12:17:22.182692298Z caller=http.go:194 level=debug traceID=4332a27c2abee4f3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.805544ms" +ts=2024-05-02T12:17:22.182258351Z caller=http.go:194 level=debug traceID=3532312e04b56ffc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.575778ms" +ts=2024-05-02T12:17:22.181865726Z caller=http.go:194 level=debug traceID=07c501066b3502c8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.464132ms" +ts=2024-05-02T12:17:22.181460615Z caller=http.go:194 level=debug traceID=2803d3f5a2b12bdd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.085415ms" +ts=2024-05-02T12:17:22.181003083Z caller=http.go:194 level=debug traceID=2606069628204526 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.644886ms" +ts=2024-05-02T12:17:22.180689736Z caller=http.go:194 level=debug traceID=21ad8cd957c5b6e9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.978732ms" +ts=2024-05-02T12:17:22.179630592Z caller=http.go:194 level=debug traceID=11d78b344aa7763c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.232729ms" +ts=2024-05-02T12:17:22.178903873Z caller=http.go:194 level=debug traceID=472a6ff9612c6ed6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.481858ms" +ts=2024-05-02T12:17:22.178163522Z caller=http.go:194 level=debug traceID=55a53b8657f82549 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 13.615273ms" +ts=2024-05-02T12:17:22.177763204Z caller=http.go:194 level=debug traceID=30165ce0055ecc1f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.494458ms" +ts=2024-05-02T12:17:22.177632694Z caller=http.go:194 level=debug traceID=7b595e51124a616c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.85904ms" +ts=2024-05-02T12:17:22.176522186Z caller=http.go:194 level=debug traceID=1b1992933c15d79c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.855715ms" +ts=2024-05-02T12:17:22.175991856Z caller=http.go:194 level=debug traceID=11edeba80c10cd8a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.251071ms" +ts=2024-05-02T12:17:22.174323552Z caller=http.go:194 level=debug traceID=12af8579bf78e932 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.792739ms" +ts=2024-05-02T12:17:22.1742277Z caller=http.go:194 level=debug traceID=00273096f105e74d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 25.77863ms" +ts=2024-05-02T12:17:22.173424302Z caller=http.go:194 level=debug traceID=4ed444d3cbffac51 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.109379ms" +ts=2024-05-02T12:17:22.173145084Z caller=http.go:194 level=debug traceID=259acc4340befa45 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.94143ms" +ts=2024-05-02T12:17:22.173148318Z caller=http.go:194 level=debug traceID=55a53b8657f82549 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 21.105285ms" +ts=2024-05-02T12:17:22.172913948Z caller=http.go:194 level=debug traceID=3532312e04b56ffc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.391461ms" +ts=2024-05-02T12:17:22.172623931Z caller=http.go:194 level=debug traceID=742a7bc42608e599 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.116126ms" +ts=2024-05-02T12:17:22.171597955Z caller=http.go:194 level=debug traceID=7482869538938e3d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.763014ms" +ts=2024-05-02T12:17:22.171296353Z caller=http.go:194 level=debug traceID=4332a27c2abee4f3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.925506ms" +ts=2024-05-02T12:17:22.170347713Z caller=http.go:194 level=debug traceID=07c501066b3502c8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.801486ms" +ts=2024-05-02T12:17:22.170166994Z caller=http.go:194 level=debug traceID=542f8922ac0b0f30 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.797602ms" +ts=2024-05-02T12:17:22.166618304Z caller=http.go:194 level=debug traceID=7b595e51124a616c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.345103ms" +ts=2024-05-02T12:17:22.165881771Z caller=http.go:194 level=debug traceID=2503f02f86d2bb1f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.504357ms" +ts=2024-05-02T12:17:22.165398864Z caller=http.go:194 level=debug traceID=33fb357be2d96130 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.013926ms" +ts=2024-05-02T12:17:22.16506165Z caller=http.go:194 level=debug traceID=156b4dc92be29be4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.723215ms" +ts=2024-05-02T12:17:22.164626296Z caller=http.go:194 level=debug traceID=11edeba80c10cd8a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.381837ms" +ts=2024-05-02T12:17:22.163705049Z caller=http.go:194 level=debug traceID=12af8579bf78e932 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.596603ms" +ts=2024-05-02T12:17:22.162524577Z caller=http.go:194 level=debug traceID=745cb0a6a1c5b6bf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.807718ms" +ts=2024-05-02T12:17:22.162242019Z caller=http.go:194 level=debug traceID=4ed444d3cbffac51 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.306374ms" +ts=2024-05-02T12:17:22.159101118Z caller=http.go:194 level=debug traceID=542f8922ac0b0f30 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.747833ms" +ts=2024-05-02T12:17:22.158256848Z caller=http.go:194 level=debug traceID=4df539314d458bc7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.810894ms" +ts=2024-05-02T12:17:22.157045042Z caller=http.go:194 level=debug traceID=6e955a3741ce1ba3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.884125ms" +ts=2024-05-02T12:17:22.156601413Z caller=http.go:194 level=debug traceID=40d6025b75ed18d0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.728568ms" +ts=2024-05-02T12:17:22.156253772Z caller=http.go:194 level=debug traceID=4cdbda577b5b5a3a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.716811ms" +ts=2024-05-02T12:17:22.155835504Z caller=http.go:194 level=debug traceID=4f295f77cbc337be orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.645044ms" +ts=2024-05-02T12:17:22.154918176Z caller=http.go:194 level=debug traceID=1a46eec5420eb782 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 297.835µs" +ts=2024-05-02T12:17:22.154392172Z caller=http.go:194 level=debug traceID=2503f02f86d2bb1f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.856305ms" +ts=2024-05-02T12:17:22.154110886Z caller=http.go:194 level=debug traceID=33fb357be2d96130 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.053643ms" +ts=2024-05-02T12:17:22.153663948Z caller=http.go:194 level=debug traceID=156b4dc92be29be4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.561666ms" +ts=2024-05-02T12:17:22.152607883Z caller=http.go:194 level=debug traceID=745cb0a6a1c5b6bf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.986855ms" +ts=2024-05-02T12:17:22.15217776Z caller=http.go:194 level=debug traceID=7f08643fc4dc3f50 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.115093ms" +ts=2024-05-02T12:17:22.150709301Z caller=http.go:194 level=debug traceID=79c0bbd0e8a6ddac orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.025604ms" +ts=2024-05-02T12:17:22.148965651Z caller=http.go:194 level=debug traceID=0be6434bf8f1ebbd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.269988ms" +ts=2024-05-02T12:17:22.148427235Z caller=http.go:194 level=debug traceID=7385a8b9d6266a7e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.239018ms" +ts=2024-05-02T12:17:22.147254198Z caller=http.go:194 level=debug traceID=4df539314d458bc7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.223684ms" +ts=2024-05-02T12:17:22.146774197Z caller=http.go:194 level=debug traceID=4f295f77cbc337be orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.332742ms" +ts=2024-05-02T12:17:22.146138283Z caller=http.go:194 level=debug traceID=4cdbda577b5b5a3a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.589651ms" +ts=2024-05-02T12:17:22.145820696Z caller=http.go:194 level=debug traceID=6e955a3741ce1ba3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.762289ms" +ts=2024-05-02T12:17:22.145828355Z caller=http.go:194 level=debug traceID=20b7bc39fbb261ba orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.830321ms" +ts=2024-05-02T12:17:22.144763805Z caller=http.go:194 level=debug traceID=40d6025b75ed18d0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.44999ms" +ts=2024-05-02T12:17:22.143777873Z caller=http.go:194 level=debug traceID=1a46eec5420eb782 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 482.341µs" +ts=2024-05-02T12:17:22.143515684Z caller=http.go:194 level=debug traceID=401b11add46bccd9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.18348ms" +ts=2024-05-02T12:17:22.142483568Z caller=http.go:194 level=debug traceID=7385a8b9d6266a7e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 7.293917ms" +ts=2024-05-02T12:17:22.1416154Z caller=http.go:194 level=debug traceID=7f08643fc4dc3f50 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.289791ms" +ts=2024-05-02T12:17:22.139668959Z caller=http.go:194 level=debug traceID=2d6baec140a44dc7 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.280334ms" +ts=2024-05-02T12:17:22.139124706Z caller=http.go:194 level=debug traceID=79c0bbd0e8a6ddac orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.846512ms" +ts=2024-05-02T12:17:22.137246878Z caller=http.go:194 level=debug traceID=4b6b1696fbfbfdff orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.519335ms" +ts=2024-05-02T12:17:22.137094016Z caller=http.go:194 level=debug traceID=0be6434bf8f1ebbd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.194177ms" +ts=2024-05-02T12:17:22.136838448Z caller=http.go:194 level=debug traceID=2d6431d661f8be97 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.355439ms" +ts=2024-05-02T12:17:22.135335518Z caller=http.go:194 level=debug traceID=49e0f30df905865e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.828785ms" +ts=2024-05-02T12:17:22.135092504Z caller=http.go:194 level=debug traceID=20b7bc39fbb261ba orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.973965ms" +ts=2024-05-02T12:17:22.1333946Z caller=http.go:194 level=debug traceID=6f17871fb35abeb0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.946336ms" +ts=2024-05-02T12:17:22.131547783Z caller=http.go:194 level=debug traceID=72907ca8e06c48c4 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.157966ms" +ts=2024-05-02T12:17:22.130479029Z caller=http.go:194 level=debug traceID=401b11add46bccd9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.646653ms" +ts=2024-05-02T12:17:22.127765686Z caller=http.go:194 level=debug traceID=12ca3124734654a9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.952722ms" +ts=2024-05-02T12:17:22.127500589Z caller=http.go:194 level=debug traceID=2d6baec140a44dc7 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.856603ms" +ts=2024-05-02T12:17:22.127472095Z caller=http.go:194 level=debug traceID=04d9eaf21e643ef2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.464504ms" +ts=2024-05-02T12:17:22.126372471Z caller=http.go:194 level=debug traceID=3e7d451130b59c02 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.848425ms" +ts=2024-05-02T12:17:22.125588793Z caller=http.go:194 level=debug traceID=2d6431d661f8be97 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.747884ms" +ts=2024-05-02T12:17:22.125414843Z caller=http.go:194 level=debug traceID=4b6b1696fbfbfdff orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.952144ms" +ts=2024-05-02T12:17:22.124965117Z caller=http.go:194 level=debug traceID=37f2618b6b492c0c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.700976ms" +ts=2024-05-02T12:17:22.124958709Z caller=http.go:194 level=debug traceID=7205679ab5a4fe28 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.413599ms" +ts=2024-05-02T12:17:22.124097416Z caller=http.go:194 level=debug traceID=49e0f30df905865e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.969293ms" +ts=2024-05-02T12:17:22.122499679Z caller=http.go:194 level=debug traceID=1024fa274746d134 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.285288ms" +ts=2024-05-02T12:17:22.122277139Z caller=http.go:194 level=debug traceID=7294c0e738006923 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.873623ms" +ts=2024-05-02T12:17:22.121782121Z caller=http.go:194 level=debug traceID=42664ba7a140d9a9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.159613ms" +ts=2024-05-02T12:17:22.121698073Z caller=http.go:194 level=debug traceID=6f17871fb35abeb0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.848519ms" +ts=2024-05-02T12:17:22.120328808Z caller=http.go:194 level=debug traceID=72907ca8e06c48c4 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.201074ms" +ts=2024-05-02T12:17:22.118264308Z caller=http.go:194 level=debug traceID=5e245dbb1ebf242e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.404902ms" +ts=2024-05-02T12:17:22.11677276Z caller=http.go:194 level=debug traceID=12ca3124734654a9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.248879ms" +ts=2024-05-02T12:17:22.116752211Z caller=http.go:194 level=debug traceID=04d9eaf21e643ef2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.300338ms" +ts=2024-05-02T12:17:22.116578583Z caller=http.go:194 level=debug traceID=1d0e86a63c708d49 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.699444ms" +ts=2024-05-02T12:17:22.115976614Z caller=http.go:194 level=debug traceID=7205679ab5a4fe28 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.742738ms" +ts=2024-05-02T12:17:22.115385619Z caller=http.go:194 level=debug traceID=7836a12bb7f1964e orgID=75 msg="POST /ingest?aggregationType=sum&from=1714652227107641016&name=checkoutservice%7B__session_id__%3D294b9729f5a7de95%2Cnamespace%3Dotel-demo%7D&sampleRate=100&spyName=gospy&units=samples&until=1714652242109516917 (200) 1.562143ms" +ts=2024-05-02T12:17:22.115149651Z caller=http.go:194 level=debug traceID=31f184b579516107 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.973802ms" +ts=2024-05-02T12:17:22.114613675Z caller=http.go:194 level=debug traceID=3e7d451130b59c02 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.614436ms" +ts=2024-05-02T12:17:22.114399585Z caller=http.go:194 level=debug traceID=37f2618b6b492c0c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.092954ms" +ts=2024-05-02T12:17:22.111532908Z caller=http.go:194 level=debug traceID=7294c0e738006923 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.290905ms" +ts=2024-05-02T12:17:22.111195884Z caller=http.go:194 level=debug traceID=7fb88983292c7ab6 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 6.316187ms" +ts=2024-05-02T12:17:22.10976706Z caller=http.go:194 level=debug traceID=42664ba7a140d9a9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.968726ms" +ts=2024-05-02T12:17:22.109548363Z caller=http.go:194 level=debug traceID=0df4f540e69d2f98 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.178183ms" +ts=2024-05-02T12:17:22.109263657Z caller=http.go:194 level=debug traceID=1024fa274746d134 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.695728ms" +ts=2024-05-02T12:17:22.107970048Z caller=http.go:194 level=debug traceID=67a901a9a33458cc orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.937496ms" +ts=2024-05-02T12:17:22.107323815Z caller=http.go:194 level=debug traceID=5e245dbb1ebf242e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.003624ms" +ts=2024-05-02T12:17:22.10734987Z caller=http.go:194 level=debug traceID=343d1184ed7f703d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.860843ms" +ts=2024-05-02T12:17:22.107258186Z caller=http.go:194 level=debug traceID=607c0f6dda78f471 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.391098ms" +ts=2024-05-02T12:17:22.10659638Z caller=http.go:194 level=debug traceID=10992046ec13e27f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.618528ms" +ts=2024-05-02T12:17:22.105248146Z caller=http.go:194 level=debug traceID=1d0e86a63c708d49 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.762168ms" +ts=2024-05-02T12:17:22.103765056Z caller=http.go:194 level=debug traceID=49d90ffdcdd3d878 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.563985ms" +ts=2024-05-02T12:17:22.103323029Z caller=http.go:194 level=debug traceID=7fb88983292c7ab6 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 9.010589ms" +ts=2024-05-02T12:17:22.102552916Z caller=http.go:194 level=debug traceID=02960514bba0eae8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 16.228342ms" +ts=2024-05-02T12:17:22.101083812Z caller=http.go:194 level=debug traceID=02960514bba0eae8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.019049ms" +ts=2024-05-02T12:17:22.10104372Z caller=http.go:194 level=debug traceID=0df4f540e69d2f98 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.87214ms" +ts=2024-05-02T12:17:22.099667894Z caller=http.go:194 level=debug traceID=02ecc40e7808e37b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.343752ms" +ts=2024-05-02T12:17:22.099493873Z caller=http.go:194 level=debug traceID=343d1184ed7f703d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.099919ms" +ts=2024-05-02T12:17:22.099437211Z caller=http.go:194 level=debug traceID=31f184b579516107 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 11.662937ms" +ts=2024-05-02T12:17:22.09889555Z caller=http.go:194 level=debug traceID=607c0f6dda78f471 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 13.833766ms" +ts=2024-05-02T12:17:22.098474543Z caller=http.go:194 level=debug traceID=67a901a9a33458cc orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.504592ms" +ts=2024-05-02T12:17:22.097788001Z caller=http.go:194 level=debug traceID=76a45dc9285e8419 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.058223ms" +ts=2024-05-02T12:17:22.096374681Z caller=http.go:194 level=debug traceID=10992046ec13e27f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.375971ms" +ts=2024-05-02T12:17:22.09587514Z caller=http.go:194 level=debug traceID=5b37cdfed73dff9c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.623935ms" +ts=2024-05-02T12:17:22.09487247Z caller=http.go:194 level=debug traceID=42c1366a3fd2ee71 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 252.535µs" +ts=2024-05-02T12:17:22.093771906Z caller=http.go:194 level=debug traceID=49d90ffdcdd3d878 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.211302ms" +ts=2024-05-02T12:17:22.09274606Z caller=http.go:194 level=debug traceID=370782747c0339aa orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.543825ms" +ts=2024-05-02T12:17:22.09234609Z caller=http.go:194 level=debug traceID=6d4d53a6eb8ea3b9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.350164ms" +ts=2024-05-02T12:17:22.091893868Z caller=http.go:194 level=debug traceID=5f0abbecb4dba68a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.769724ms" +ts=2024-05-02T12:17:22.091114886Z caller=http.go:194 level=debug traceID=00fb2b7e05a875df orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.222979ms" +ts=2024-05-02T12:17:22.090199136Z caller=http.go:194 level=debug traceID=41caa1a6655fa14d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 490.221µs" +ts=2024-05-02T12:17:22.089624685Z caller=http.go:194 level=debug traceID=02ecc40e7808e37b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.419078ms" +ts=2024-05-02T12:17:22.08714105Z caller=http.go:194 level=debug traceID=76a45dc9285e8419 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.919236ms" +ts=2024-05-02T12:17:22.085819712Z caller=http.go:194 level=debug traceID=47089c6a3b914d52 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.752126ms" +ts=2024-05-02T12:17:22.084609154Z caller=http.go:194 level=debug traceID=5b37cdfed73dff9c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.005815ms" +ts=2024-05-02T12:17:22.083873401Z caller=http.go:194 level=debug traceID=42c1366a3fd2ee71 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 509.484µs" +ts=2024-05-02T12:17:22.083850136Z caller=http.go:194 level=debug traceID=29cd82f8ad0533c1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 9.862563ms" +ts=2024-05-02T12:17:22.083400077Z caller=http.go:194 level=debug traceID=08f8efba8bad3648 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.935921ms" +ts=2024-05-02T12:17:22.082381033Z caller=http.go:194 level=debug traceID=1a4a0b5d431dafff orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.941099ms" +ts=2024-05-02T12:17:22.082109464Z caller=http.go:194 level=debug traceID=370782747c0339aa orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.214705ms" +ts=2024-05-02T12:17:22.081759684Z caller=http.go:194 level=debug traceID=6d4d53a6eb8ea3b9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.736305ms" +ts=2024-05-02T12:17:22.081434894Z caller=http.go:194 level=debug traceID=5f0abbecb4dba68a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.269777ms" +ts=2024-05-02T12:17:22.081433522Z caller=http.go:194 level=debug traceID=7c72aa5cd32d1fd9 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.730905ms" +ts=2024-05-02T12:17:22.081006558Z caller=http.go:194 level=debug traceID=7a0225f151760cd3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.416154ms" +ts=2024-05-02T12:17:22.080714654Z caller=http.go:194 level=debug traceID=3b3e3119a97d69e1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.730472ms" +ts=2024-05-02T12:17:22.079938958Z caller=http.go:194 level=debug traceID=00fb2b7e05a875df orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.763043ms" +ts=2024-05-02T12:17:22.079922042Z caller=http.go:194 level=debug traceID=35c6bd16739a5c59 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.107805ms" +ts=2024-05-02T12:17:22.078894252Z caller=http.go:194 level=debug traceID=5d15b6e5ae1489fd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.036532ms" +ts=2024-05-02T12:17:22.078008233Z caller=http.go:194 level=debug traceID=41caa1a6655fa14d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 333.622µs" +ts=2024-05-02T12:17:22.077709164Z caller=http.go:194 level=debug traceID=6405ed5eb3f6787a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.760575ms" +ts=2024-05-02T12:17:22.077354841Z caller=http.go:194 level=debug traceID=364842290b2482da orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.64217ms" +ts=2024-05-02T12:17:22.076415005Z caller=http.go:194 level=debug traceID=4e79b29418434eb0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.304905ms" +ts=2024-05-02T12:17:22.074968989Z caller=http.go:194 level=debug traceID=2b2afef5997107e0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 309.792µs" +ts=2024-05-02T12:17:22.074742865Z caller=http.go:194 level=debug traceID=47089c6a3b914d52 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.748746ms" +ts=2024-05-02T12:17:22.07405284Z caller=http.go:194 level=debug traceID=6d06be610350b7e1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 272.694µs" +ts=2024-05-02T12:17:22.073705046Z caller=http.go:194 level=debug traceID=7c72aa5cd32d1fd9 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.27639ms" +ts=2024-05-02T12:17:22.072574642Z caller=http.go:194 level=debug traceID=08f8efba8bad3648 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.271552ms" +ts=2024-05-02T12:17:22.072229834Z caller=http.go:194 level=debug traceID=29cd82f8ad0533c1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 17.365643ms" +ts=2024-05-02T12:17:22.071914212Z caller=http.go:194 level=debug traceID=6351d806d9808f9e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.549942ms" +ts=2024-05-02T12:17:22.071347474Z caller=http.go:194 level=debug traceID=7a0225f151760cd3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.098241ms" +ts=2024-05-02T12:17:22.071201745Z caller=http.go:194 level=debug traceID=6fb85ecf184c5d08 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.034795ms" +ts=2024-05-02T12:17:22.070911419Z caller=http.go:194 level=debug traceID=02585fb762f098cd orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.011636ms" +ts=2024-05-02T12:17:22.070668131Z caller=http.go:194 level=debug traceID=1a4a0b5d431dafff orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.63855ms" +ts=2024-05-02T12:17:22.07033402Z caller=http.go:194 level=debug traceID=35c6bd16739a5c59 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.608757ms" +ts=2024-05-02T12:17:22.068170494Z caller=http.go:194 level=debug traceID=5d2fd46f4994d874 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.445605ms" +ts=2024-05-02T12:17:22.067811219Z caller=http.go:194 level=debug traceID=04a62aaaec104a8d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.898879ms" +ts=2024-05-02T12:17:22.067598981Z caller=http.go:194 level=debug traceID=364842290b2482da orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.932753ms" +ts=2024-05-02T12:17:22.06747275Z caller=http.go:194 level=debug traceID=5d15b6e5ae1489fd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.816844ms" +ts=2024-05-02T12:17:22.065696515Z caller=http.go:194 level=debug traceID=4e79b29418434eb0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.056024ms" +ts=2024-05-02T12:17:22.065626659Z caller=http.go:194 level=debug traceID=6405ed5eb3f6787a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.04079ms" +ts=2024-05-02T12:17:22.064891777Z caller=http.go:194 level=debug traceID=012b25b4edcda361 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.398285ms" +ts=2024-05-02T12:17:22.064688416Z caller=http.go:194 level=debug traceID=3b3e3119a97d69e1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.563877ms" +ts=2024-05-02T12:17:22.064546758Z caller=http.go:194 level=debug traceID=31ea974d5c245b06 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 8.640468ms" +ts=2024-05-02T12:17:22.064134543Z caller=http.go:194 level=debug traceID=2b2afef5997107e0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 737.032µs" +ts=2024-05-02T12:17:22.063066243Z caller=http.go:194 level=debug traceID=00cea412956397de orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.760236ms" +ts=2024-05-02T12:17:22.062721153Z caller=http.go:194 level=debug traceID=6d06be610350b7e1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 365.146µs" +ts=2024-05-02T12:17:22.061150918Z caller=http.go:194 level=debug traceID=2d01b47e94cfa112 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.35283ms" +ts=2024-05-02T12:17:22.060336789Z caller=http.go:194 level=debug traceID=6fb85ecf184c5d08 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.988661ms" +ts=2024-05-02T12:17:22.059943792Z caller=http.go:194 level=debug traceID=02585fb762f098cd orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.791632ms" +ts=2024-05-02T12:17:22.059807221Z caller=http.go:194 level=debug traceID=6351d806d9808f9e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.963509ms" +ts=2024-05-02T12:17:22.059653145Z caller=http.go:194 level=debug traceID=0413ed554cbf805d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.198586ms" +ts=2024-05-02T12:17:22.059066684Z caller=http.go:194 level=debug traceID=290aabb9c069686b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 14.499357ms" +ts=2024-05-02T12:17:22.058474386Z caller=http.go:194 level=debug traceID=185568dc06f0a553 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.505539ms" +ts=2024-05-02T12:17:22.058345935Z caller=http.go:194 level=debug traceID=041a1a2174da339c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 51.298022ms" +ts=2024-05-02T12:17:22.057506532Z caller=http.go:194 level=debug traceID=5d2fd46f4994d874 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.184483ms" +ts=2024-05-02T12:17:22.055829051Z caller=http.go:194 level=debug traceID=23d3838a4d9d1ebf orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.844817ms" +ts=2024-05-02T12:17:22.055745419Z caller=http.go:194 level=debug traceID=04a62aaaec104a8d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.329068ms" +ts=2024-05-02T12:17:22.05412588Z caller=http.go:194 level=debug traceID=31ea974d5c245b06 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 10.443604ms" +ts=2024-05-02T12:17:22.054029031Z caller=http.go:194 level=debug traceID=012b25b4edcda361 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.872111ms" +ts=2024-05-02T12:17:22.053350734Z caller=http.go:194 level=debug traceID=59e036919e18711b orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.520247ms" +ts=2024-05-02T12:17:22.052338897Z caller=http.go:194 level=debug traceID=14f33b6829c06a3a orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.071648ms" +ts=2024-05-02T12:17:22.051667036Z caller=http.go:194 level=debug traceID=00cea412956397de orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.790269ms" +ts=2024-05-02T12:17:22.051464439Z caller=http.go:194 level=debug traceID=20a89143553dcaa8 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.3474ms" +ts=2024-05-02T12:17:22.049812545Z caller=http.go:194 level=debug traceID=78ac7a767a31d1fb orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.189489ms" +ts=2024-05-02T12:17:22.048922857Z caller=http.go:194 level=debug traceID=2b1bce54db1ef7ea orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.578413ms" +ts=2024-05-02T12:17:22.047745414Z caller=http.go:194 level=debug traceID=0413ed554cbf805d orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.748952ms" +ts=2024-05-02T12:17:22.047231254Z caller=http.go:194 level=debug traceID=21501ae4d526df05 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.803055ms" +ts=2024-05-02T12:17:22.047121946Z caller=http.go:194 level=debug traceID=290aabb9c069686b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 14.99953ms" +ts=2024-05-02T12:17:22.046995855Z caller=http.go:194 level=debug traceID=185568dc06f0a553 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.566379ms" +ts=2024-05-02T12:17:22.046949718Z caller=http.go:194 level=debug traceID=2d01b47e94cfa112 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.879019ms" +ts=2024-05-02T12:17:22.044617635Z caller=http.go:194 level=debug traceID=23d3838a4d9d1ebf orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.535017ms" +ts=2024-05-02T12:17:22.043746018Z caller=http.go:194 level=debug traceID=59e036919e18711b orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.111929ms" +ts=2024-05-02T12:17:22.043514171Z caller=http.go:194 level=debug traceID=75ff94029073a264 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 5.610421ms" +ts=2024-05-02T12:17:22.042281534Z caller=http.go:194 level=debug traceID=14f33b6829c06a3a orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.059245ms" +ts=2024-05-02T12:17:22.04157056Z caller=http.go:194 level=debug traceID=25f440d723c8d734 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.725673ms" +ts=2024-05-02T12:17:22.041536571Z caller=http.go:194 level=debug traceID=2ce1553c5837d08f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 269.546µs" +ts=2024-05-02T12:17:22.041215048Z caller=http.go:194 level=debug traceID=0bd085ccaf61a16f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.123699ms" +ts=2024-05-02T12:17:22.040352145Z caller=http.go:194 level=debug traceID=78ac7a767a31d1fb orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.982894ms" +ts=2024-05-02T12:17:22.04032489Z caller=http.go:194 level=debug traceID=2153f256e4f92641 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.63601ms" +ts=2024-05-02T12:17:22.040245688Z caller=http.go:194 level=debug traceID=1e0bde37b4e5182e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.425066ms" +ts=2024-05-02T12:17:22.039705766Z caller=http.go:194 level=debug traceID=1b199f4e83f34fef orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.134103ms" +ts=2024-05-02T12:17:22.03957216Z caller=http.go:194 level=debug traceID=20a89143553dcaa8 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.649297ms" +ts=2024-05-02T12:17:22.039293842Z caller=http.go:194 level=debug traceID=34dca646ac0a0771 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.156629ms" +ts=2024-05-02T12:17:22.03852856Z caller=http.go:194 level=debug traceID=2b1bce54db1ef7ea orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.110279ms" +ts=2024-05-02T12:17:22.037791059Z caller=http.go:194 level=debug traceID=21501ae4d526df05 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.346112ms" +ts=2024-05-02T12:17:22.034490549Z caller=http.go:194 level=debug traceID=1b199f4e83f34fef orgID=1218 msg="POST /push.v1.PusherService/Push (200) 6.229016ms" +ts=2024-05-02T12:17:22.032039986Z caller=http.go:194 level=debug traceID=0b5ff8e4ff4512f2 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.29181ms" +ts=2024-05-02T12:17:22.031860037Z caller=http.go:194 level=debug traceID=0d06eb315cfa01e3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.867247ms" +ts=2024-05-02T12:17:22.031421436Z caller=http.go:194 level=debug traceID=2153f256e4f92641 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.727351ms" +ts=2024-05-02T12:17:22.03133828Z caller=http.go:194 level=debug traceID=561f4cd490f774b3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 4.962989ms" +ts=2024-05-02T12:17:22.031066318Z caller=http.go:194 level=debug traceID=75ff94029073a264 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.5623ms" +ts=2024-05-02T12:17:22.030366152Z caller=http.go:194 level=debug traceID=4401c5ac4b0c91e1 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 12.536493ms" +ts=2024-05-02T12:17:22.030301053Z caller=http.go:194 level=debug traceID=3a798995b4393d4f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.492842ms" +ts=2024-05-02T12:17:22.030144074Z caller=http.go:194 level=debug traceID=25f440d723c8d734 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.839959ms" +ts=2024-05-02T12:17:22.029773783Z caller=http.go:194 level=debug traceID=2ce1553c5837d08f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 313.215µs" +ts=2024-05-02T12:17:22.029609992Z caller=http.go:194 level=debug traceID=1e0bde37b4e5182e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.520082ms" +ts=2024-05-02T12:17:22.029367281Z caller=http.go:194 level=debug traceID=08667da127dd35b0 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.819749ms" +ts=2024-05-02T12:17:22.028727666Z caller=http.go:194 level=debug traceID=0bd085ccaf61a16f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.491564ms" +ts=2024-05-02T12:17:22.028686624Z caller=http.go:194 level=debug traceID=1056f086e6477b60 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.82615ms" +ts=2024-05-02T12:17:22.02864845Z caller=http.go:194 level=debug traceID=34dca646ac0a0771 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.696411ms" +ts=2024-05-02T12:17:22.02709282Z caller=http.go:194 level=debug traceID=13b7025a333d3555 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.333569ms" +ts=2024-05-02T12:17:22.026589171Z caller=http.go:194 level=debug traceID=041a1a2174da339c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 29.903464ms" +ts=2024-05-02T12:17:22.026216612Z caller=http.go:194 level=debug traceID=557199439d6690f5 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.076725ms" +ts=2024-05-02T12:17:22.026053855Z caller=http.go:194 level=debug traceID=1de7dd700ce57931 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.615892ms" +ts=2024-05-02T12:17:22.025451175Z caller=http.go:194 level=debug traceID=08667da127dd35b0 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.222814ms" +ts=2024-05-02T12:17:22.024018258Z caller=http.go:194 level=debug traceID=4c94b12308d8c27c orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.311869ms" +ts=2024-05-02T12:17:22.023906125Z caller=http.go:194 level=debug traceID=376b50d849f1a594 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.743426ms" +ts=2024-05-02T12:17:22.023510796Z caller=http.go:194 level=debug traceID=00545d5c1acae94e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.557341ms" +ts=2024-05-02T12:17:22.023187765Z caller=http.go:194 level=debug traceID=4401c5ac4b0c91e1 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 16.196785ms" +ts=2024-05-02T12:17:22.022124115Z caller=http.go:194 level=debug traceID=72fdd33f7dc5057e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 1.386592ms" +ts=2024-05-02T12:17:22.021147182Z caller=http.go:194 level=debug traceID=561f4cd490f774b3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.462178ms" +ts=2024-05-02T12:17:22.021115613Z caller=http.go:194 level=debug traceID=2d69ca20de57e260 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 7.04173ms" +ts=2024-05-02T12:17:22.021060342Z caller=http.go:194 level=debug traceID=0b5ff8e4ff4512f2 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.374969ms" +ts=2024-05-02T12:17:22.020741693Z caller=http.go:194 level=debug traceID=3a798995b4393d4f orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.875493ms" +ts=2024-05-02T12:17:22.020505836Z caller=http.go:194 level=debug traceID=0d06eb315cfa01e3 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.092607ms" +ts=2024-05-02T12:17:22.018589487Z caller=http.go:194 level=debug traceID=1056f086e6477b60 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 3.720071ms" +ts=2024-05-02T12:17:22.017482038Z caller=http.go:194 level=debug traceID=13b7025a333d3555 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 5.513252ms" +ts=2024-05-02T12:17:22.01714251Z caller=http.go:194 level=debug traceID=259b8e7d5941d032 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.783995ms" +ts=2024-05-02T12:17:22.016801632Z caller=http.go:194 level=debug traceID=557199439d6690f5 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.634782ms" +ts=2024-05-02T12:17:22.015981722Z caller=http.go:194 level=debug traceID=25d360c4297645b3 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.234325ms" +ts=2024-05-02T12:17:22.015889736Z caller=http.go:194 level=debug traceID=0a3c6b02b2158a0d orgID=3648 msg="POST /push.v1.PusherService/Push (200) 59.123501ms" +ts=2024-05-02T12:17:22.015481869Z caller=http.go:194 level=debug traceID=1de7dd700ce57931 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 4.198598ms" +ts=2024-05-02T12:17:22.014595081Z caller=http.go:194 level=debug traceID=3c8680f60a9c062f orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.167727ms" +ts=2024-05-02T12:17:22.014345524Z caller=http.go:194 level=debug traceID=6dd82416add7aa01 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.701507ms" +ts=2024-05-02T12:17:22.013456056Z caller=http.go:194 level=debug traceID=4b3553746a16085e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 3.814718ms" +ts=2024-05-02T12:17:22.013303908Z caller=http.go:194 level=debug traceID=768bdad205b95633 orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.006051ms" +ts=2024-05-02T12:17:22.013228831Z caller=http.go:194 level=debug traceID=4c94b12308d8c27c orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.757687ms" +ts=2024-05-02T12:17:22.012587207Z caller=http.go:194 level=debug traceID=376b50d849f1a594 orgID=1218 msg="POST /push.v1.PusherService/Push (200) 2.105001ms" +ts=2024-05-02T12:17:22.012263091Z caller=http.go:194 level=debug traceID=4c0783390f6a589e orgID=3648 msg="POST /push.v1.PusherService/Push (200) 2.420053ms" +ts=2024-05-02T12:17:22.01212206Z caller=http.go:194 level=debug traceID=00545d5c1acae94e orgID=1218 msg="POST /push.v1.PusherService/Push (200) 1.604732ms" \ No newline at end of file diff --git a/pkg/pattern/drain/testdata/journald.txt b/pkg/pattern/drain/testdata/journald.txt new file mode 100644 index 0000000000000..8bf1f9aee644c --- /dev/null +++ b/pkg/pattern/drain/testdata/journald.txt @@ -0,0 +1,1000 @@ +I0507 11:59:44.523502 4727 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x82q" status="Running" +XMT: Solicit on eth0, interval 117950ms. +time="2024-05-07T11:59:43.954655670Z" level=info msg="StartContainer for \"93fa5decd62691912f90c9b27526f5e00183239bfa4d3f4ea8578a7873b9c2b4\"" +time="2024-05-07T11:59:43.954289531Z" level=info msg="CreateContainer within sandbox \"ee9dc07bca79ef7dffe2a6eb326e27236e9e97c35913c7aae16ee0a62632fc25\" for &ContainerMetadata{Name:cortex-gw,Attempt:1660,} returns container id \"93fa5decd62691912f90c9b27526f5e00183239bfa4d3f4ea8578a7873b9c2b4\"" +time="2024-05-07T11:59:43.941729092Z" level=info msg="CreateContainer within sandbox \"ee9dc07bca79ef7dffe2a6eb326e27236e9e97c35913c7aae16ee0a62632fc25\" for container &ContainerMetadata{Name:cortex-gw,Attempt:1660,}" +I0507 11:59:43.939053 3659 scope.go:117] "RemoveContainer" containerID="9940112c30fda42aa2b814faddfc969d9a2328ae70ecb9b858d75aa6f8b61483" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36674 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36674 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36674 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:43.923954 4643 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-j7jh6_hosted-grafana(83fb0f38-728e-4050-9500-6ac9fc9f21c8)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-j7jh6" podUID="83fb0f38-728e-4050-9500-6ac9fc9f21c8" +I0507 11:59:43.923422 4643 scope.go:117] "RemoveContainer" containerID="a85b6a771be0a2165463617e0c7a4f5b42dbb5c232c57166f32a72d969a25bf1" +I0507 11:59:43.910438 4624 kubelet.go:2498] "SyncLoop (probe)" probe="readiness" status="ready" pod="hosted-grafana/ephemeral1511182183108soniaag-grafana-66698879db-52td7" +E0507 11:59:43.910202 4624 prober.go:239] "Unable to write all bytes from execInContainer" err="short write" expectedBytes=12133 actualBytes=10240 +I0507 11:59:43.794276 4775 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-nvpf" status="Running" +E0507 11:59:43.743153 4601 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-5bb9765dd8-ktf2b_hosted-grafana(e8405a93-3a4c-4074-909d-661219c1f849)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-5bb9765dd8-ktf2b" podUID="e8405a93-3a4c-4074-909d-661219c1f849" +I0507 11:59:43.742649 4601 scope.go:117] "RemoveContainer" containerID="8dbc699386128aa4e4af25beb0ea7e7ecad1b2d5e829061a04ff808054f050aa" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36672 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36672 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36672 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:43.704880 4602 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-4gclf_hosted-grafana(fe493f66-8d1f-4435-9208-0304fd499ee1)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-4gclf" podUID="fe493f66-8d1f-4435-9208-0304fd499ee1" +I0507 11:59:43.704288 4602 scope.go:117] "RemoveContainer" containerID="2773338620ccfb32536d17788865e6fd4c7de7250ab31a7922195ffc1387ee5f" +I0507 11:59:43.644447 4755 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-1-main-n2s16-1-1dd-97837cc3-lpds" status="Running" +E0507 11:59:43.643392 4592 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-vqkzm_hosted-grafana(d3742b42-2b35-4c32-8267-7cf79bbcb441)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-vqkzm" podUID="d3742b42-2b35-4c32-8267-7cf79bbcb441" +I0507 11:59:43.642869 4592 scope.go:117] "RemoveContainer" containerID="d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679" +I0507 11:59:43.642392 4592 kubelet.go:2498] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-vqkzm" +I0507 11:59:43.520806 4724 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-j5wp" status="Running" +I0507 11:59:43.503115 4773 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dqf8" status="Running" +2024-05-07T11:59:43.499167Z INFO TelemetryEventsCollector ExtHandler Collected 2 events for extension: Microsoft.Azure.Extensions.CustomScript +2024-05-07T11:59:43.484606Z INFO ExtHandler ExtHandler Downloading agent manifest +I0507 11:59:43.476936 4734 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vvgr" status="Running" +E0507 11:59:43.469105 3315 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"gcom-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/frontend-monitoring:6a8eb5a\\\"\"" pod="faro/update-usage-28487080-9sqzn" podUID="2cc85139-2f31-44ae-a308-3dc0df893592" +I0507 11:59:43.455842 4729 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-v5z4" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36670 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36670 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36670 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:43.362209 4601 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-85282-20418-1\\\"\"" pod="hosted-grafana/ephemeral1511182185282svenner-grafana-6f6b6f4d85-9xlcc" podUID="fee4a5b2-d22d-4d80-8041-8796a997679a" +I0507 11:59:43.321744 4731 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x24s" status="Running" +I0507 11:59:43.306213 4736 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-78d9" status="Running" +I0507 11:59:43.223958 4731 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x24s" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36668 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36668 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36668 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:43.157806 4724 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-t8zj" status="Running" +XMT: Solicit on eth0, interval 126130ms. +E0507 11:59:43.151914 4572 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6testslow2-grafana-7b64f97bd7-t22zk_hosted-grafana(9890650a-e338-4648-be7a-bb7f9726aa46)\"" pod="hosted-grafana/k6testslow2-grafana-7b64f97bd7-t22zk" podUID="9890650a-e338-4648-be7a-bb7f9726aa46" +I0507 11:59:43.151045 4572 scope.go:117] "RemoveContainer" containerID="885a879aadfd9cb0665b3cf90d203d82dd9a1da0a75fe01d8b01f5cd49efd910" +I0507 11:59:43.119017 4736 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dhdn" status="Running" +I0507 11:59:43.091861 4740 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-1-main-n2s16-1-1dd-97837cc3-cfqk" status="Running" +I0507 11:59:43.048506 4590 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-5jwm" status="Running" +I0507 11:59:43.041723 4589 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="grafana-apps/bigquery-datasource-grafana-app-fast-7c94f74fd6-9n6t7" secret="" err="secret \"dockerhub\" not found" +I0507 11:59:42.939527 4773 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-1-main-n2s16-1-1dd-97837cc3-4t6k" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36657 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36657 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36657 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:42.925813 4733 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=prometheus pod=bryan-prometheus-0_bryan-prometheus(6dadfe71-eb19-4231-a96e-c64bb5499a1e)\"" pod="bryan-prometheus/bryan-prometheus-0" podUID="6dadfe71-eb19-4231-a96e-c64bb5499a1e" +I0507 11:59:42.925068 4733 scope.go:117] "RemoveContainer" containerID="f0f5ac8b5f4dba0a416c838dd7ccfa903bd1ca22e36ebc4d98a29b4e646063c6" +I0507 11:59:42.923268 4731 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7c5w" status="Running" +E0507 11:59:42.886785 4597 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.0.2\\\"\"" pod="hosted-grafana/johangrafana10-grafana-69c6449bbd-k2bgp" podUID="bb953c26-c201-4082-9b56-85ab12c1d0e1" +I0507 11:59:42.870523 4708 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-kqpq" status="Running" +2024-05-07T11:59:42.753652Z INFO TelemetryEventsCollector ExtHandler Collected 2 events for extension: Microsoft.Azure.Extensions.CustomScript +audit: type=1400 audit(1715083182.707:151): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36655 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36655 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36655 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36655 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:42.683728 4736 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-78d9" status="Running" +I0507 11:59:42.668601 4727 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-rr2n" status="Running" +E0507 11:59:42.540931 4590 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-85282-20418-1\\\"\"" pod="hosted-grafana/ephemeral1511182185282svenner-grafana-6944cbdfcc-64z2p" podUID="1abeccba-cc20-47a4-b55c-fff4b7decbe1" +I0507 11:59:42.473132 4761 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-1-main-n2s16-1-1dd-97837cc3-84qv" status="Running" +I0507 11:59:42.461420 4733 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="grafana-apps/query-grafana-app-fast-7d6dfcc787-t626q" secret="" err="secret \"dockerhub\" not found" +I0507 11:59:42.452711 4610 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-perf-n2s8-0-1dd3-91689928-hl8m" status="Running" +audit: type=1400 audit(1715083182.427:150): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36645 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083182.427:149): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36645 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083182.427:148): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36645 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36645 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36645 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36645 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:42.390135 4726 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"support-agent\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=support-agent pod=support-agent-557dff8b77-sx6hb_support-agent(f7b72dbb-4f3a-45b1-88c0-62337a3e8d3d)\"" pod="support-agent/support-agent-557dff8b77-sx6hb" podUID="f7b72dbb-4f3a-45b1-88c0-62337a3e8d3d" +I0507 11:59:42.389722 4726 scope.go:117] "RemoveContainer" containerID="b35f99aea28d40fc317084351108a819285a8c62583dab13a9dc1a35e150715d" +E0507 11:59:42.363245 4601 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-l2ck2_hosted-grafana(aa7ee40a-4ed2-456b-86bc-a6b77d48fa82)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-l2ck2" podUID="aa7ee40a-4ed2-456b-86bc-a6b77d48fa82" +E0507 11:59:42.362999 4601 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.0.2\\\"\"" pod="hosted-grafana/johan6-grafana-85546bbbf5-xbkrj" podUID="a1ca81cd-1fd3-4f14-b6a5-a129930ba761" +I0507 11:59:42.361599 4601 scope.go:117] "RemoveContainer" containerID="3c87e154309d0cdeea32be3c09f3cac0965efff7f1775be65bfef8fbc925782d" +I0507 11:59:42.325378 4581 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-4hrn" status="Running" +audit: type=1400 audit(1715083182.207:147): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36643 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083182.207:146): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36643 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083182.207:145): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36643 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36643 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36643 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36643 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:42.183133 4578 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=oncalldev-grafana-7b88d9459-fvqtx_hosted-grafana(fc7753d0-4067-4626-b539-5fd27ded163b)\"" pod="hosted-grafana/oncalldev-grafana-7b88d9459-fvqtx" podUID="fc7753d0-4067-4626-b539-5fd27ded163b" +E0507 11:59:42.183013 4578 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-5chwh_hosted-grafana(d349a15d-9276-457b-8e62-7d35f1bf81c0)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-5chwh" podUID="d349a15d-9276-457b-8e62-7d35f1bf81c0" +I0507 11:59:42.182460 4578 scope.go:117] "RemoveContainer" containerID="633c86551db397ab1ff61c06ae2334cfd3d76e9152cd24012569a2671774acbb" +I0507 11:59:42.182336 4578 scope.go:117] "RemoveContainer" containerID="11b59ae6f9e2827841afd016742b48dc692f5756819d08f4cd123fba98fd732d" +I0507 11:59:42.044224 4729 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-6vzp" status="Running" +E0507 11:59:42.042844 4589 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=ephemeral1180076306267marefr-grafana-5d979884b7-lnsq2_hosted-grafana(78337132-8e81-47cc-8772-51f6e72e7927)\"" pod="hosted-grafana/ephemeral1180076306267marefr-grafana-5d979884b7-lnsq2" podUID="78337132-8e81-47cc-8772-51f6e72e7927" +E0507 11:59:42.042747 4589 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-nj27g_hosted-grafana(212d6baa-7068-4ad2-9617-f67f010e866d)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-nj27g" podUID="212d6baa-7068-4ad2-9617-f67f010e866d" +I0507 11:59:42.042088 4589 scope.go:117] "RemoveContainer" containerID="efb5462666d496e154e0477e0540b5325157c76f784e16834d1ab78c4fce2815" +I0507 11:59:42.041815 4589 scope.go:117] "RemoveContainer" containerID="b22ba3d04cca58dd06227978cad02c4d11287b17e97aa2c9ed2a324204455fdf" +I0507 11:59:41.978947 4736 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-lw2b" status="Running" +E0507 11:59:41.965110 4731 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"overrides-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/enterprise-logs:callum-shard-firstlast-08\\\"\"" pod="loki-dev-010/overrides-exporter-98c77fd66-6zj6m" podUID="1ff5bf3e-5856-4f6f-ae04-273f2dee170b" +audit: type=1400 audit(1715083181.923:144): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36641 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083181.923:143): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36641 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083181.923:142): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36641 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +kauditd_printk_skb: 62 callbacks suppressed +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36641 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36641 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36641 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:41.844213 4732 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vpgr" status="Running" +I0507 11:59:41.844328 4639 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-lnkb" status="Running" +I0507 11:59:41.819406 4775 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-f2n6" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36639 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36639 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36639 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:41.619645 6247 prober.go:107] "Probe failed" probeType="Readiness" pod="grafana-agent/grafana-agent-helm-4" podUID="c36c5200-1cd6-4093-893c-c022f91af996" containerName="grafana-agent" probeResult="failure" output="Get \"http://10.0.99.125:3090/-/ready\": dial tcp 10.0.99.125:3090: connect: connection refused" +E0507 11:59:41.604606 4586 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6432-916-1\\\"\"" pod="hosted-grafana/ephemeral1180076306432stephan-grafana-696d787664-jftqh" podUID="41fba902-127b-4514-b1ca-ed431bc59a6c" +E0507 11:59:41.604446 4586 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=dev05devuseast0test-grafana-6cb68b9788-v8dgd_hosted-grafana(59ef7574-134f-4888-826e-9a22062f29f8)\"" pod="hosted-grafana/dev05devuseast0test-grafana-6cb68b9788-v8dgd" podUID="59ef7574-134f-4888-826e-9a22062f29f8" +I0507 11:59:41.603726 4586 scope.go:117] "RemoveContainer" containerID="c1992a17a0b5dc3d80080fcc1602d9481f2b4259ab708628828de7f34211f199" +E0507 11:59:41.601058 4624 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-8rbvv_hosted-grafana(55c96163-2915-466e-a1e7-38faf29e5f57)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-8rbvv" podUID="55c96163-2915-466e-a1e7-38faf29e5f57" +I0507 11:59:41.600163 4746 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-1-main-n2s16-1-1dd-97837cc3-sjzb" status="Running" +I0507 11:59:41.599190 4624 scope.go:117] "RemoveContainer" containerID="db587d00d587cb056ec6251bcea2fb39b19a5862db5d637d9980c88ea624c88c" +E0507 11:59:41.575311 4578 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=grafana pod=k6testslow3-grafana-dcb88d9f9-h74pm_hosted-grafana(01d5760f-8e5c-41f2-ac50-49eda7752498)\"" pod="hosted-grafana/k6testslow3-grafana-dcb88d9f9-h74pm" podUID="01d5760f-8e5c-41f2-ac50-49eda7752498" +I0507 11:59:41.574530 4578 scope.go:117] "RemoveContainer" containerID="bb168ccb972acd75ea8ada775d9453e1478cb408bdb2b1a9fdb660a1ae30a1a1" +E0507 11:59:41.538570 4590 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6teststeady6-grafana-67b679bd8c-l7knf_hosted-grafana(c5975fd5-22d7-4efb-a6b6-3064876188c1)\"" pod="hosted-grafana/k6teststeady6-grafana-67b679bd8c-l7knf" podUID="c5975fd5-22d7-4efb-a6b6-3064876188c1" +I0507 11:59:41.537978 4590 scope.go:117] "RemoveContainer" containerID="5b8aad8ab95e5f4201702424140d73f5cc582d6d48583a31ca0b0dabea27d806" +E0507 11:59:41.484796 4588 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-kkfm4_hosted-grafana(8d976e31-c3bb-489b-bb66-eb16be45f1e2)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-kkfm4" podUID="8d976e31-c3bb-489b-bb66-eb16be45f1e2" +I0507 11:59:41.484251 4588 scope.go:117] "RemoveContainer" containerID="b491b9e12e1ac8e16fd091f5dd23a3b892c443352b1a7f2952d49dd83c09b0d6" +I0507 11:59:41.481102 4644 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-kdh7" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36637 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36637 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36637 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:41.375710 4736 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ruler\" with CreateContainerConfigError: \"secret \\\"ruler-alertmanager-token\\\" not found\"" pod="ge-metrics-federation/gem-mimir-ruler-5f56f7846b-fgxdm" podUID="07c06e21-137b-4fdd-b7d3-703f0a567720" +E0507 11:59:41.375655 4736 kuberuntime_manager.go:1256] container &Container{Name:ruler,Image:grafana/enterprise-metrics:v2.12.0,Command:[],Args:[-target=ruler -config.expand-env=true -config.file=/etc/mimir/mimir.yaml -distributor.remote-timeout=10s],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:9095,Protocol:TCP,HostIP:,},ContainerPort{Name:memberlist,HostPort:0,ContainerPort:7946,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:JAEGER_AGENT_HOST,Value:alloy-otlp.alloy-otlp.svc.cluster.local.,ValueFrom:nil,},EnvVar{Name:JAEGER_TAGS,Value:namespace=ge-metrics-federation,cluster=dev-us-central-0,ValueFrom:nil,},EnvVar{Name:JAEGER_SAMPLER_MANAGER_HOST_PORT,Value:http://alloy-otlp.alloy-otlp.svc.cluster.local.:5778/sampling,ValueFrom:nil,},EnvVar{Name:GOOGLE_APPLICATION_CREDENTIALS,Value:/var/secrets/google/credentials.json,ValueFrom:nil,},EnvVar{Name:AM_TOKEN,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:ruler-alertmanager-token,},Key:token,Optional:nil,},},},EnvVar{Name:JAEGER_REPORTER_MAX_QUEUE_SIZE,Value:1000,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:gcs-credentials,ReadOnly:false,MountPath:/var/secrets/google/,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:license,ReadOnly:false,MountPath:/license,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:runtime-config,ReadOnly:false,MountPath:/var/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:storage,ReadOnly:false,MountPath:/data,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:active-queries,ReadOnly:false,MountPath:/active-query-tracker,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-jtnbs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{1 0 http-metrics},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:45,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod gem-mimir-ruler-5f56f7846b-fgxdm_ge-metrics-federation(07c06e21-137b-4fdd-b7d3-703f0a567720): CreateContainerConfigError: secret "ruler-alertmanager-token" not found +I0507 11:59:41.373523 4646 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-m75z" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36635 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36635 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36635 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +time="2024-05-07T11:59:41.183044974Z" level=info msg="RemoveContainer for \"8d94f2aa54fdb8f6e4ddfed0f7db95a8f719e0eeccbe440b54033c29c7138cca\" returns successfully" +E0507 11:59:41.179412 2776 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=dafdeveuwest2-grafana-7845d969b5-f8h5q_hosted-grafana(14ac9939-b36a-40d7-9ca9-a0367aab99d8)\"" pod="hosted-grafana/dafdeveuwest2-grafana-7845d969b5-f8h5q" podUID="14ac9939-b36a-40d7-9ca9-a0367aab99d8" +time="2024-05-07T11:59:41.179375449Z" level=info msg="RemoveContainer for \"8d94f2aa54fdb8f6e4ddfed0f7db95a8f719e0eeccbe440b54033c29c7138cca\"" +I0507 11:59:41.178454 2776 scope.go:117] "RemoveContainer" containerID="eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef" +I0507 11:59:41.178001 2776 scope.go:117] "RemoveContainer" containerID="8d94f2aa54fdb8f6e4ddfed0f7db95a8f719e0eeccbe440b54033c29c7138cca" +I0507 11:59:41.177954 2776 kubelet.go:2421] "SyncLoop (PLEG): event for pod" pod="hosted-grafana/dafdeveuwest2-grafana-7845d969b5-f8h5q" event={"ID":"14ac9939-b36a-40d7-9ca9-a0367aab99d8","Type":"ContainerDied","Data":"eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef"} +I0507 11:59:41.177905 2776 generic.go:334] "Generic (PLEG): container finished" podID="14ac9939-b36a-40d7-9ca9-a0367aab99d8" containerID="eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef" exitCode=1 +E0507 11:59:41.152034 4572 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6439-933-1\\\"\"" pod="hosted-grafana/ephemeral1180076306439dafyddt-grafana-9769b9f5-g5qqf" podUID="e6633496-a926-4a28-8db8-6405d33cb4bc" +E0507 11:59:41.151972 4572 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-enterprise-6372-855-1\\\"\"" pod="hosted-grafana/ephemeral1180076306372jacobso-grafana-7f66f49b8d-kzhxd" podUID="7ac84154-783b-4672-b865-f728da592129" +E0507 11:59:41.042036 4589 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6432-916-1\\\"\"" pod="hosted-grafana/ephemeral1180076306432stephan-grafana-6486f498c4-79z9j" podUID="301e3aba-d59e-4699-9e89-41507660c707" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36619 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36619 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36619 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:40.886783 4597 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77487-8287-1\\\"\"" pod="hosted-grafana/ephemeral1511182177487torkelo-grafana-79dd77959f-2l2kd" podUID="4d3be4e9-d8c5-487f-a292-ecb699c3aaad" +E0507 11:59:40.886262 4597 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-84322-18859-1\\\"\"" pod="hosted-grafana/ephemeral1511182184322yuritce-grafana-c6bc4dcc7-zffvh" podUID="fd6e5a63-416f-4ec0-9372-99da41bdffe5" +I0507 11:59:40.813204 4731 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7c5w" status="Running" +E0507 11:59:40.787727 2776 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef not found: not found" probeType="Readiness" pod="hosted-grafana/dafdeveuwest2-grafana-7845d969b5-f8h5q" podUID="14ac9939-b36a-40d7-9ca9-a0367aab99d8" containerName="grafana" +E0507 11:59:40.787679 2776 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef not found: not found" containerID="eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef" cmd=["/bin/hgrun","check"] +time="2024-05-07T11:59:40.787470004Z" level=error msg="ExecSync for \"eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef not found: not found" +E0507 11:59:40.786721 2776 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef not found: not found" containerID="eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef" cmd=["/bin/hgrun","check"] +time="2024-05-07T11:59:40.786428397Z" level=error msg="ExecSync for \"eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef not found: not found" +time="2024-05-07T11:59:40.785339490Z" level=error msg="ExecSync for \"eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef not found: not found" +E0507 11:59:40.785696 2776 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef not found: not found" containerID="eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef" cmd=["/bin/hgrun","check"] + > +I0507 11:59:40.784002 2776 prober.go:107] "Probe failed" probeType="Readiness" pod="hosted-grafana/dafdeveuwest2-grafana-7845d969b5-f8h5q" podUID="14ac9939-b36a-40d7-9ca9-a0367aab99d8" containerName="grafana" probeResult="failure" output=< + ts=2024-05-07T11:59:40.579977004Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.452 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:40.165927901Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.452 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:40.095094601Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.452 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:39.968003625Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.452 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:39.859160404Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.452 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:39.73942929Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.452 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:39.597629189Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.452 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:39.513020473Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.452 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:39.430133771Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.452 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:39.357435956Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.452 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:39.309688652Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.452 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:39.237760243Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.452 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:39.214283344Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.452 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health +time="2024-05-07T11:59:40.783954281Z" level=warning msg="cleaning up after shim disconnected" id=eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef namespace=k8s.io +time="2024-05-07T11:59:40.783971781Z" level=info msg="cleaning up dead shim" namespace=k8s.io +time="2024-05-07T11:59:40.783846880Z" level=info msg="shim disconnected" id=eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef namespace=k8s.io +run-containerd-io.containerd.runtime.v2.task-k8s.io-eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef-rootfs.mount: Deactivated successfully. +I0507 11:59:40.768911 4618 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-kvt4" status="Running" +E0507 11:59:40.761832 3303 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cortex-gw\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=cortex-gw pod=cortex-gw-78bc9b5ccc-8hkmp_faro(44b54226-b4bd-46e0-a3f0-257cb44d9ea8)\"" pod="faro/cortex-gw-78bc9b5ccc-8hkmp" podUID="44b54226-b4bd-46e0-a3f0-257cb44d9ea8" +cri-containerd-eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef.scope: Deactivated successfully. +I0507 11:59:40.761377 3303 scope.go:117] "RemoveContainer" containerID="9f3955a57aa496cb888a35102ef0ee777d6a75cdc12addbdafc2d9b3fb9cc080" +E0507 11:59:40.722802 4732 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"support-agent\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=support-agent pod=support-agent-557dff8b77-c6f8b_support-agent(ede5a224-96fb-45d0-b452-1eb2de73cf19)\"" pod="support-agent/support-agent-557dff8b77-c6f8b" podUID="ede5a224-96fb-45d0-b452-1eb2de73cf19" +I0507 11:59:40.722255 4732 scope.go:117] "RemoveContainer" containerID="e0a235a59cc57d2dbbcab276b25c7bb1bab9cecc37697779748125072457736f" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36617 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36617 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36617 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:40.582478 4586 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-tjd7" status="Running" +I0507 11:59:40.574065 4647 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-86xp" status="Running" +E0507 11:59:40.570675 4595 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6testslow4-grafana-bc95d7c87-55jh7_hosted-grafana(69c825b9-ceee-41b6-861c-a9695a3b7771)\"" pod="hosted-grafana/k6testslow4-grafana-bc95d7c87-55jh7" podUID="69c825b9-ceee-41b6-861c-a9695a3b7771" +I0507 11:59:40.569595 4595 scope.go:117] "RemoveContainer" containerID="d72ee3acf4ba575ccbe6544c8fcfef41f73a5120459124bb518dbd9f589891e7" +I0507 11:59:40.566172 4647 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-gmgr" status="Running" +E0507 11:59:40.525566 4615 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\\\"\"" pod="hosted-grafana/ephemeral1180076306436hairyhe-grafana-6fb647ccf6-n9f7m" podUID="37586611-dc03-4f8b-8bce-80dadde5d571" +I0507 11:59:40.505981 4603 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-perf-n2s8-0-1dd3-91689928-mm2d" status="Running" +I0507 11:59:40.480336 4777 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dhh8" status="Running" +I0507 11:59:40.447535 4726 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-1-main-n2s16-1-1dd-97837cc3-5k89" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36599 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36599 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36599 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:40.363392 4601 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-80141-12952-1\\\"\"" pod="hosted-grafana/ephemeral1511182180141joshhun-grafana-7dbb6d4777-rzgds" podUID="c644591b-d4d7-4bef-913f-00b9f96539d4" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36597 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36597 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36597 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:40.183052 4578 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6433-919-1\\\"\"" pod="hosted-grafana/ephemeral1180076306433stephan-grafana-596f864fd8-wmfmg" podUID="49c2807d-900e-4029-804b-93c078a398f6" +E0507 11:59:40.087432 4730 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"agent\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=agent pod=jaeger-agent-856f67c6d7-6xj9z_jaeger(1a240429-7c6f-4c4c-8c4e-d2579a6e737e)\"" pod="jaeger/jaeger-agent-856f67c6d7-6xj9z" podUID="1a240429-7c6f-4c4c-8c4e-d2579a6e737e" +I0507 11:59:40.087072 4730 scope.go:117] "RemoveContainer" containerID="7518b1ab8a429bc205618da216992759f7c641bb6a7feea31daa6fc52170f1db" +I0507 11:59:40.005475 4734 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-t2kf" status="Running" +I0507 11:59:39.995966 4726 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7h6b" status="Running" +E0507 11:59:39.925282 4733 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pdc\" with ErrImageNeverPull: \"Container image \\\"us.gcr.io/hosted-grafana/pdc:0.1.415\\\" is not present with pull policy of Never\"" pod="pdc/private-datasource-connect-564fb6cfbb-l8pgv" podUID="57e4a0cb-5e77-47bd-b277-70f4b1512c44" +E0507 11:59:39.925221 4733 kuberuntime_manager.go:1256] container &Container{Name:pdc,Image:us.gcr.io/hosted-grafana/pdc:0.1.415,Command:[],Args:[-proxy.auth.ca-keys-dir=/var/run/secrets/pdc-certs -proxy.socks-server.addr=:10443 -proxy.ssh-server.addr=:2222 -proxy.use-socks-username-for-routing -proxy.api.http-address=:9182 -proxy.check-connpool-address-in-ring -memberlist.join=dns+gossip-ring.pdc.svc.cluster.local:7946 -api.http-address=:11443 -distributor.enabled=true -distributor.addr=:10444 -distributor.use-socks-username-for-routing -gateway.enabled=true -gateway.addr=:2244 -log.level=debug -certs.ca-private-key-file=/var/run/secrets/pdc-certs/ca.key -certs.ca-cert-file=/var/run/secrets/pdc-certs/ca.crt -certs.ca-pub-file=/var/run/secrets/pdc-certs/ca.pub -certs.cluster=local-k8s -shard-size=3 -graceful-shutdown-period=30s -enable-multiple-networks],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:socks,HostPort:0,ContainerPort:10443,Protocol:TCP,HostIP:,},ContainerPort{Name:ssh,HostPort:0,ContainerPort:2222,Protocol:TCP,HostIP:,},ContainerPort{Name:distributor,HostPort:0,ContainerPort:10444,Protocol:TCP,HostIP:,},ContainerPort{Name:gateway,HostPort:0,ContainerPort:2244,Protocol:TCP,HostIP:,},ContainerPort{Name:api,HostPort:0,ContainerPort:11443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{250 -3} {} 250m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:pdc-certs,ReadOnly:true,MountPath:/var/run/secrets/pdc-certs,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-wcbmb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{0 11443 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:40,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/sleep 5],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Never,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod private-datasource-connect-564fb6cfbb-l8pgv_pdc(57e4a0cb-5e77-47bd-b277-70f4b1512c44): ErrImageNeverPull: Container image "us.gcr.io/hosted-grafana/pdc:0.1.415" is not present with pull policy of Never +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36593 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36593 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36593 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:39.848842 4597 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-4jtn" status="Running" +E0507 11:59:39.831408 4591 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.0.2\\\"\"" pod="hosted-grafana/grafana10-grafana-78d6685854-sx786" podUID="c6bea181-911d-41f1-9008-c07eb94d5d9d" +E0507 11:59:39.706381 4602 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6testslow4-grafana-bc95d7c87-b596x_hosted-grafana(3eeb0d67-7e96-4a25-bde3-99dd64c0d7d1)\"" pod="hosted-grafana/k6testslow4-grafana-bc95d7c87-b596x" podUID="3eeb0d67-7e96-4a25-bde3-99dd64c0d7d1" +I0507 11:59:39.705664 4602 scope.go:117] "RemoveContainer" containerID="f127301dfd17ffb654fd37d23c3b47994165737a57d5e1ccbc5169cec18da4d7" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36591 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36591 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36591 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:39.690250 4733 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ruler\" with CreateContainerConfigError: \"secret \\\"ruler-alertmanager-token\\\" not found\"" pod="ge-metrics-federation/gem-mimir-ruler-8c54cd69f-27jpq" podUID="0a159d8c-5540-44c2-a592-f43db7a1aae6" +E0507 11:59:39.690197 4733 kuberuntime_manager.go:1256] container &Container{Name:ruler,Image:grafana/enterprise-metrics:v2.11.1,Command:[],Args:[-target=ruler -config.expand-env=true -config.file=/etc/mimir/mimir.yaml],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:9095,Protocol:TCP,HostIP:,},ContainerPort{Name:memberlist,HostPort:0,ContainerPort:7946,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:JAEGER_AGENT_HOST,Value:alloy-otlp.alloy-otlp.svc.cluster.local.,ValueFrom:nil,},EnvVar{Name:JAEGER_TAGS,Value:namespace=ge-metrics-federation,cluster=dev-us-central-0,ValueFrom:nil,},EnvVar{Name:JAEGER_SAMPLER_MANAGER_HOST_PORT,Value:http://alloy-otlp.alloy-otlp.svc.cluster.local.:5778/sampling,ValueFrom:nil,},EnvVar{Name:GOOGLE_APPLICATION_CREDENTIALS,Value:/var/secrets/google/credentials.json,ValueFrom:nil,},EnvVar{Name:AM_TOKEN,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:ruler-alertmanager-token,},Key:token,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:gcs-credentials,ReadOnly:false,MountPath:/var/secrets/google/,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:license,ReadOnly:false,MountPath:/license,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:runtime-config,ReadOnly:false,MountPath:/var/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:storage,ReadOnly:false,MountPath:/data,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:active-queries,ReadOnly:false,MountPath:/active-query-tracker,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-qp8rd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{1 0 http-metrics},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:45,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod gem-mimir-ruler-8c54cd69f-27jpq_ge-metrics-federation(0a159d8c-5540-44c2-a592-f43db7a1aae6): CreateContainerConfigError: secret "ruler-alertmanager-token" not found +I0507 11:59:39.611540 4773 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vf2b" status="Running" +I0507 11:59:39.571623 4737 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-mwjl" status="Running" +E0507 11:59:39.570301 4595 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6testslow1-grafana-54499558d4-fvfn2_hosted-grafana(d9130e10-120d-4288-9a2a-27a11c3d5fb5)\"" pod="hosted-grafana/k6testslow1-grafana-54499558d4-fvfn2" podUID="d9130e10-120d-4288-9a2a-27a11c3d5fb5" +I0507 11:59:39.569666 4595 scope.go:117] "RemoveContainer" containerID="172037953598baa976c96c3b22a935cb88d55ed99211307c20c2f5a0ca7049ef" +I0507 11:59:39.560605 4739 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="logs-endpoint-dev-005/kafka-exporter-766c6757b5-bggf6" secret="" err="secret \"not-needed\" not found" +I0507 11:59:39.431762 4589 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="kafka/kafka-controller-2" secret="" err="secret \"gcr\" not found" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36589 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36589 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36589 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:39.397972 4737 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-cndv" status="Running" +E0507 11:59:39.370058 4601 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6433-919-1\\\"\"" pod="hosted-grafana/ephemeral1180076306433stephan-grafana-6b47bdf747-5vlqc" podUID="1ad10dd5-1033-44c1-a8f0-3bd5f04f9396" +E0507 11:59:39.370037 4601 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.0.2\\\"\"" pod="hosted-grafana/johangrafana10-grafana-6b6b6954cc-gst92" podUID="c9a01896-0ab8-496e-b51d-a9c542f87965" +I0507 11:59:39.216314 4737 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-6bbf" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36587 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36587 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36587 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +time="2024-05-07T11:59:39.180955622Z" level=info msg="RemoveContainer for \"011391a46c9e71c4dd0e38155b5439bf8dfd38d967f29995d1856e8688423c3f\" returns successfully" +E0507 11:59:39.176592 4631 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-7q7fd_hosted-grafana(5f7c88ef-55b9-4d72-8a2f-c9f2fa39f273)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-7q7fd" podUID="5f7c88ef-55b9-4d72-8a2f-c9f2fa39f273" +time="2024-05-07T11:59:39.175742342Z" level=info msg="RemoveContainer for \"011391a46c9e71c4dd0e38155b5439bf8dfd38d967f29995d1856e8688423c3f\"" +I0507 11:59:39.171822 4631 scope.go:117] "RemoveContainer" containerID="6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408" +I0507 11:59:39.171341 4631 scope.go:117] "RemoveContainer" containerID="011391a46c9e71c4dd0e38155b5439bf8dfd38d967f29995d1856e8688423c3f" +I0507 11:59:39.171313 4631 kubelet.go:2426] "SyncLoop (PLEG): event for pod" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-7q7fd" event={"ID":"5f7c88ef-55b9-4d72-8a2f-c9f2fa39f273","Type":"ContainerDied","Data":"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408"} +I0507 11:59:39.171274 4631 generic.go:334] "Generic (PLEG): container finished" podID="5f7c88ef-55b9-4d72-8a2f-c9f2fa39f273" containerID="6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408" exitCode=1 +I0507 11:59:39.168633 2776 kubelet.go:2493] "SyncLoop (probe)" probe="readiness" status="" pod="hosted-grafana/dafdeveuwest2-grafana-7845d969b5-f8h5q" +I0507 11:59:39.168215 2776 kubelet.go:2421] "SyncLoop (PLEG): event for pod" pod="hosted-grafana/dafdeveuwest2-grafana-7845d969b5-f8h5q" event={"ID":"14ac9939-b36a-40d7-9ca9-a0367aab99d8","Type":"ContainerStarted","Data":"eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef"} +E0507 11:59:39.152534 4572 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\\\"\"" pod="hosted-grafana/ephemeral1511182177506ashharr-grafana-9446fd844-7nzkd" podUID="cc369230-6dd4-43de-8a99-b900823fc053" +E0507 11:59:39.152408 4572 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6testslow5-grafana-6b4464c649-qgbsd_hosted-grafana(7576d413-e89a-47d5-84e3-697803ff3819)\"" pod="hosted-grafana/k6testslow5-grafana-6b4464c649-qgbsd" podUID="7576d413-e89a-47d5-84e3-697803ff3819" +E0507 11:59:39.152231 4572 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=legacyalerting-grafana-767589fc8d-6nm2t_hosted-grafana(2ddc4db1-70a6-41fe-a272-b5986a846e3a)\"" pod="hosted-grafana/legacyalerting-grafana-767589fc8d-6nm2t" podUID="2ddc4db1-70a6-41fe-a272-b5986a846e3a" +I0507 11:59:39.151757 4572 scope.go:117] "RemoveContainer" containerID="2e88c55b76390a7f9f0383f808e012173d5e761d2f50663f05e595d53a1605f9" +I0507 11:59:39.151671 4572 scope.go:117] "RemoveContainer" containerID="321219eadc6cdc30f7a3e6d9c9adc1f5852d72f80bdc13cb1870d2ef24d6a8db" +E0507 11:59:39.149450 4729 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cluster-agent\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=cluster-agent pod=appdynamics-cluster-agent-appdynamics-cluster-agent-56667dmbnkv_integration(69bc5e6c-0451-443e-af8a-c831871afbb8)\"" pod="integration/appdynamics-cluster-agent-appdynamics-cluster-agent-56667dmbnkv" podUID="69bc5e6c-0451-443e-af8a-c831871afbb8" +I0507 11:59:39.148673 4729 scope.go:117] "RemoveContainer" containerID="e887493c6e0eba98c77d48f6440bced72b79684e24aeef0a09a1cdebd9dbe85e" +E0507 11:59:39.044392 4589 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6testslow2-grafana-6f8cdc574c-vkwn9_hosted-grafana(25bd0e02-eade-4034-93b0-1b212199e949)\"" pod="hosted-grafana/k6testslow2-grafana-6f8cdc574c-vkwn9" podUID="25bd0e02-eade-4034-93b0-1b212199e949" +I0507 11:59:39.043426 4589 scope.go:117] "RemoveContainer" containerID="b50868147571f34e653fc07591105b455a08c4658b575711ffcd9749591a40be" +I0507 11:59:38.994979 4768 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-9jbm" status="Running" +I0507 11:59:38.950175 4732 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-462z" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36576 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36576 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36576 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:38.801070 4592 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-lqg5" status="Running" +E0507 11:59:38.778703 4734 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"agent\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=agent pod=jaeger-agent-856f67c6d7-tcsmd_jaeger(9121c1a3-6d79-4411-be8e-41406c88944a)\"" pod="jaeger/jaeger-agent-856f67c6d7-tcsmd" podUID="9121c1a3-6d79-4411-be8e-41406c88944a" +I0507 11:59:38.778026 4734 scope.go:117] "RemoveContainer" containerID="11f6b0c150af531670af31444292720785e10c6fbe5a00b7ef85dd062ac22ffe" +E0507 11:59:38.773774 4625 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-kzlv4_hosted-grafana(db10d4d9-7c00-4757-9bca-fa2c735f6595)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-kzlv4" podUID="db10d4d9-7c00-4757-9bca-fa2c735f6595" +I0507 11:59:38.773181 4625 scope.go:117] "RemoveContainer" containerID="32f138fccb479a3484193f15220d1d6bef16a750071446fd9a6b1288c31d8d83" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68547 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68547 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68547 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083178.748:507): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68546 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083178.748:506): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68545 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083178.748:505): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68545 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083178.748:504): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68545 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083178.748:503): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68543 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083178.748:502): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68543 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083178.748:501): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68543 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68546 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68546 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68546 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68545 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68545 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68545 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68543 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68543 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68543 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083178.744:500): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68542 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083178.744:499): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68542 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083178.744:498): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68542 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +kauditd_printk_skb: 5 callbacks suppressed +time="2024-05-07T11:59:38.748149797Z" level=info msg="StartContainer for \"eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef\" returns successfully" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68542 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68542 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=68542 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:38.738752 4777 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-z6f6" status="Running" +I0507 11:59:38.725225 4744 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-1-main-n2s16-1-1dd-97837cc3-xhp4" status="Running" +Started libcontainer container eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef. +E0507 11:59:38.704674 4602 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-80141-12952-1\\\"\"" pod="hosted-grafana/ephemeral1511182180141joshhun-grafana-744f5cfd67-6w9jb" podUID="1e2a9789-01a5-4ab0-97d4-06dee391f43f" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36574 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36574 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36574 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +time="2024-05-07T11:59:38.694650245Z" level=info msg="StartContainer for \"eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef\"" +var-lib-containerd-tmpmounts-containerd\x2dmount777551232.mount: Deactivated successfully. +time="2024-05-07T11:59:38.693858938Z" level=info msg="CreateContainer within sandbox \"10f321d50b51389b7845c1c5393aff42e209a45fcee6fec6d74c8b60dd5266ad\" for &ContainerMetadata{Name:grafana,Attempt:7,} returns container id \"eeccb21da13bfae40b1a01984522c7a8f8dcb65dba3cc1cc2f2ba73381d9eaef\"" +time="2024-05-07T11:59:38.671307647Z" level=info msg="CreateContainer within sandbox \"10f321d50b51389b7845c1c5393aff42e209a45fcee6fec6d74c8b60dd5266ad\" for container &ContainerMetadata{Name:grafana,Attempt:7,}" +I0507 11:59:38.667270 2776 scope.go:117] "RemoveContainer" containerID="8d94f2aa54fdb8f6e4ddfed0f7db95a8f719e0eeccbe440b54033c29c7138cca" +I0507 11:59:38.638688 4776 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-qmgr" status="Running" +I0507 11:59:38.539722 4771 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7lvz" status="Running" +E0507 11:59:38.539381 4590 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6433-919-1\\\"\"" pod="hosted-grafana/ephemeral1180076306433stephan-grafana-74cf7bb5b7-qmfgj" podUID="0b654534-e87f-4660-a634-3aa23906f24d" +I0507 11:59:38.497645 4735 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-trgv" status="Running" +E0507 11:59:38.489629 4631 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 not found: not found" probeType="Readiness" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-7q7fd" podUID="5f7c88ef-55b9-4d72-8a2f-c9f2fa39f273" containerName="grafana" +E0507 11:59:38.489587 4631 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 not found: not found" containerID="6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408" cmd=["/bin/hgrun","check"] +time="2024-05-07T11:59:38.489392061Z" level=error msg="ExecSync for \"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 not found: not found" +E0507 11:59:38.488880 4631 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 not found: not found" containerID="6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408" cmd=["/bin/hgrun","check"] +time="2024-05-07T11:59:38.488693040Z" level=error msg="ExecSync for \"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 not found: not found" +E0507 11:59:38.488155 4631 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 not found: not found" containerID="6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408" cmd=["/bin/hgrun","check"] +time="2024-05-07T11:59:38.487998369Z" level=error msg="ExecSync for \"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 not found: not found" +E0507 11:59:38.487509 4631 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 not found: not found" probeType="Readiness" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-7q7fd" podUID="5f7c88ef-55b9-4d72-8a2f-c9f2fa39f273" containerName="grafana" +E0507 11:59:38.487460 4631 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 not found: not found" containerID="6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408" cmd=["/bin/hgrun","check"] +time="2024-05-07T11:59:38.487245276Z" level=error msg="ExecSync for \"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 not found: not found" +E0507 11:59:38.486679 4631 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 not found: not found" containerID="6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408" cmd=["/bin/hgrun","check"] +time="2024-05-07T11:59:38.486488188Z" level=error msg="ExecSync for \"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 not found: not found" +time="2024-05-07T11:59:38.485721949Z" level=error msg="ExecSync for \"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 not found: not found" + > +E0507 11:59:38.485960 4631 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task 6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 not found: not found" containerID="6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408" cmd=["/bin/hgrun","check"] + ts=2024-05-07T11:59:32.025687537Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request + ts=2024-05-07T11:59:31.47288066Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:30.25205688Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:29.489758832Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:28.89372222Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:27.549765307Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:27.111040682Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:25.771680727Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:25.398875396Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:24.719782935Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:24.258151357Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:23.238736396Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:22.763876203Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:22.337877023Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:21.976968587Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:21.412826453Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:20.542485218Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health +I0507 11:59:38.485111 4631 prober.go:107] "Probe failed" probeType="Readiness" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-7q7fd" podUID="5f7c88ef-55b9-4d72-8a2f-c9f2fa39f273" containerName="grafana" probeResult="failure" output=< +E0507 11:59:38.485042 4631 prober.go:239] "Unable to write all bytes from execInContainer" err="short write" expectedBytes=11555 actualBytes=10240 + ts=2024-05-07T11:59:20.133570834Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:19.968957316Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:19.615197163Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:19.34348473Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:18.888685553Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:18.342780761Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:17.847204341Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:17.651565636Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:17.380926872Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:17.139181882Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:16.971537625Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:16.843946945Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:16.798396868Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:16.588703925Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:16.524179416Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:16.399564514Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:16.274970494Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:16.185392265Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:16.167132483Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health +time="2024-05-07T11:59:38.484586527Z" level=error msg="Failed to delete exec process \"d9e0a1867ce73695ad859f2b0a76fe8f5053db8a5e49142d747e53a445729bd4\" for container \"6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408\"" error="ttrpc: closed: unknown" +time="2024-05-07T11:59:38.484606284Z" level=warning msg="cleaning up after shim disconnected" id=6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 namespace=k8s.io +time="2024-05-07T11:59:38.484616161Z" level=info msg="cleaning up dead shim" namespace=k8s.io +time="2024-05-07T11:59:38.484530585Z" level=info msg="shim disconnected" id=6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408 namespace=k8s.io +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36564 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36564 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36564 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:38.411649 4775 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-t94m" status="Running" +time="2024-05-07T11:59:38.385332298Z" level=info msg="RemoveContainer for \"f4fe841b1c27d602f09fc97c821d9cbc9654349f07afc532685d1a6a2ed887ed\" returns successfully" +E0507 11:59:38.377710 4589 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6teststeady3-grafana-659d5ff58d-h4lmj_hosted-grafana(85274c17-190e-4275-a8f3-6e111cd833bf)\"" pod="hosted-grafana/k6teststeady3-grafana-659d5ff58d-h4lmj" podUID="85274c17-190e-4275-a8f3-6e111cd833bf" +time="2024-05-07T11:59:38.377259687Z" level=info msg="RemoveContainer for \"f4fe841b1c27d602f09fc97c821d9cbc9654349f07afc532685d1a6a2ed887ed\"" +I0507 11:59:38.376714 4589 scope.go:117] "RemoveContainer" containerID="fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a" +I0507 11:59:38.375936 4589 scope.go:117] "RemoveContainer" containerID="f4fe841b1c27d602f09fc97c821d9cbc9654349f07afc532685d1a6a2ed887ed" +I0507 11:59:38.375903 4589 kubelet.go:2426] "SyncLoop (PLEG): event for pod" pod="hosted-grafana/k6teststeady3-grafana-659d5ff58d-h4lmj" event={"ID":"85274c17-190e-4275-a8f3-6e111cd833bf","Type":"ContainerDied","Data":"fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a"} +I0507 11:59:38.375858 4589 generic.go:334] "Generic (PLEG): container finished" podID="85274c17-190e-4275-a8f3-6e111cd833bf" containerID="fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a" exitCode=1 +I0507 11:59:38.242879 4603 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-perf-n2s8-0-1dd3-91689928-9f87" status="Running" +I0507 11:59:38.197379 2791 kubelet.go:2421] "SyncLoop (PLEG): event for pod" pod="hosted-grafana/dafdeveuwest2-grafana-546fbd789d-czx47" event={"ID":"fc6ba4ea-9950-4999-8ad2-bdc9a577fb34","Type":"ContainerStarted","Data":"4346b5c7d89f0d5b24a4b190740ed7327d12365cbd86dd55f845a8416cb1824e"} +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36561 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36561 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36561 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:38.195145 2791 kubelet.go:2421] "SyncLoop (PLEG): event for pod" pod="hosted-grafana/victor-grafana-7b7bb568cc-grflq" event={"ID":"1803645b-5526-41b4-bf88-271be4827277","Type":"ContainerStarted","Data":"a63e882bf89caa0e0f3027fc2c068801b36e5b3386b4d4f2570cb4f4d77298f4"} +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36559 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36559 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36559 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36558 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36558 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36558 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:38.183090 4578 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=grafanawargame1-grafana-5cb77d49cd-kxj2w_hosted-grafana(ce1d40ca-a97a-4e10-b362-dae978b1723f)\"" pod="hosted-grafana/grafanawargame1-grafana-5cb77d49cd-kxj2w" podUID="ce1d40ca-a97a-4e10-b362-dae978b1723f" +I0507 11:59:38.182549 4578 scope.go:117] "RemoveContainer" containerID="a30ce77f9f4670e75bd7ceb718e65c4a191d953fdfecc0e5c25086f212bd0a33" +time="2024-05-07T11:59:38.178929893Z" level=info msg="StartContainer for \"a63e882bf89caa0e0f3027fc2c068801b36e5b3386b4d4f2570cb4f4d77298f4\" returns successfully" +Started libcontainer container a63e882bf89caa0e0f3027fc2c068801b36e5b3386b4d4f2570cb4f4d77298f4. +E0507 11:59:38.152209 4572 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6testslow1-grafana-5bf579b6cc-cqp4w_hosted-grafana(e56accb0-e2cc-41ba-b7c8-91d0ab3c1b8d)\"" pod="hosted-grafana/k6testslow1-grafana-5bf579b6cc-cqp4w" podUID="e56accb0-e2cc-41ba-b7c8-91d0ab3c1b8d" +I0507 11:59:38.151368 4572 scope.go:117] "RemoveContainer" containerID="0e8924bd8e1c9018a410b5b69f9ad729c09b60fa5e4257f17783ab7bbb5f949a" +I0507 11:59:38.131862 4738 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-j94k" status="Running" +time="2024-05-07T11:59:38.129348185Z" level=info msg="StartContainer for \"a63e882bf89caa0e0f3027fc2c068801b36e5b3386b4d4f2570cb4f4d77298f4\"" +time="2024-05-07T11:59:38.128540975Z" level=info msg="CreateContainer within sandbox \"346dc5e0c503a6ca1f0281e3b6f8e32563ebf8a61ed467b4d0dc5b4030a115b9\" for &ContainerMetadata{Name:hgrun,Attempt:0,} returns container id \"a63e882bf89caa0e0f3027fc2c068801b36e5b3386b4d4f2570cb4f4d77298f4\"" +time="2024-05-07T11:59:38.118600653Z" level=info msg="CreateContainer within sandbox \"346dc5e0c503a6ca1f0281e3b6f8e32563ebf8a61ed467b4d0dc5b4030a115b9\" for container &ContainerMetadata{Name:hgrun,Attempt:0,}" +time="2024-05-07T11:59:38.117772842Z" level=info msg="PullImage \"us.gcr.io/hosted-grafana/hg-plugins:2024-05-07-v545244-f51851984\"" +I0507 11:59:38.116658 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hg-plugins) is not from ACR, return empty authentication +time="2024-05-07T11:59:38.116099322Z" level=info msg="PullImage \"us.gcr.io/hosted-grafana/hgrun:0.1.452\" returns image reference \"sha256:9fb1bce3e4a228f50768d21842cd7d7fafc1d586eaa0326c9d3c86d79a36868a\"" +time="2024-05-07T11:59:38.116062821Z" level=info msg="Pulled image \"us.gcr.io/hosted-grafana/hgrun:0.1.452\" with image id \"sha256:9fb1bce3e4a228f50768d21842cd7d7fafc1d586eaa0326c9d3c86d79a36868a\", repo tag \"us.gcr.io/hosted-grafana/hgrun:0.1.452\", repo digest \"us.gcr.io/hosted-grafana/hgrun@sha256:b492dbbbee9faf9dba63c9fd89e6f9e148239765454c6a54c4284a2828dec153\", size \"19109699\" in 781.210092ms" +time="2024-05-07T11:59:38.115073809Z" level=info msg="ImageUpdate event name:\"us.gcr.io/hosted-grafana/hgrun@sha256:b492dbbbee9faf9dba63c9fd89e6f9e148239765454c6a54c4284a2828dec153\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +time="2024-05-07T11:59:38.113482790Z" level=info msg="ImageUpdate event name:\"us.gcr.io/hosted-grafana/hgrun:0.1.452\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +time="2024-05-07T11:59:38.111878670Z" level=info msg="ImageUpdate event name:\"sha256:9fb1bce3e4a228f50768d21842cd7d7fafc1d586eaa0326c9d3c86d79a36868a\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +time="2024-05-07T11:59:38.111105661Z" level=info msg="stop pulling image us.gcr.io/hosted-grafana/hgrun:0.1.452: active requests=0, bytes read=6766" +time="2024-05-07T11:59:38.110778357Z" level=info msg="ImageUpdate event name:\"us.gcr.io/hosted-grafana/hgrun:0.1.452\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +I0507 11:59:38.092172 4527 kubelet.go:2426] "SyncLoop (PLEG): event for pod" pod="otel-demo/otel-demo-dev-checkoutservice-6ddf9b978b-zqrsr" event={"ID":"f263b787-926e-459a-95a0-f9ef8e4e9bc2","Type":"ContainerStarted","Data":"95bf586cd79d43120ff44582d4dbd2476de61744411f8515b9b2c527a41fd5d9"} +E0507 11:59:38.083847 4589 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a not found: not found" probeType="Readiness" pod="hosted-grafana/k6teststeady3-grafana-659d5ff58d-h4lmj" podUID="85274c17-190e-4275-a8f3-6e111cd833bf" containerName="grafana" +E0507 11:59:38.083788 4589 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a not found: not found" containerID="fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a" cmd=["/bin/hgrun","check"] +time="2024-05-07T11:59:38.083577708Z" level=error msg="ExecSync for \"fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a not found: not found" +E0507 11:59:38.083085 4589 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a not found: not found" containerID="fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a" cmd=["/bin/hgrun","check"] +time="2024-05-07T11:59:38.082843516Z" level=error msg="ExecSync for \"fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a not found: not found" +time="2024-05-07T11:59:38.081886661Z" level=error msg="ExecSync for \"fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a not found: not found" + > +E0507 11:59:38.082179 4589 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a not found: not found" containerID="fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a" cmd=["/bin/hgrun","check"] + ts=2024-05-07T11:59:37.257054006Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:36.616408464Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:35.943353082Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:35.651524459Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:34.604654159Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:33.75455853Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health +I0507 11:59:38.081101 4589 prober.go:107] "Probe failed" probeType="Readiness" pod="hosted-grafana/k6teststeady3-grafana-659d5ff58d-h4lmj" podUID="85274c17-190e-4275-a8f3-6e111cd833bf" containerName="grafana" probeResult="failure" output=< + ts=2024-05-07T11:59:32.904157283Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:32.40247514Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:32.084670264Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:31.541769769Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:31.195900943Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:30.90418468Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:30.441210172Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:30.279567335Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:30.151869927Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:29.762204853Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:29.416349977Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:29.317686715Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:29.076543925Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:28.879830344Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:28.760910211Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:28.624184207Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:28.464985015Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:28.414503386Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:28.338847511Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:28.318295189Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-62-g2605e8595 msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health +time="2024-05-07T11:59:38.080784392Z" level=warning msg="cleaning up after shim disconnected" id=fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a namespace=k8s.io +time="2024-05-07T11:59:38.080799009Z" level=info msg="cleaning up dead shim" namespace=k8s.io +time="2024-05-07T11:59:38.080699151Z" level=info msg="shim disconnected" id=fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a namespace=k8s.io +time="2024-05-07T11:59:38.080668278Z" level=info msg="StartContainer for \"95bf586cd79d43120ff44582d4dbd2476de61744411f8515b9b2c527a41fd5d9\" returns successfully" +I0507 11:59:37.961573 4624 kubelet.go:2498] "SyncLoop (probe)" probe="readiness" status="ready" pod="hosted-grafana/ephemeral1511182180263lucyche-grafana-575d4b9448-r7sp7" +E0507 11:59:37.960437 4624 prober.go:239] "Unable to write all bytes from execInContainer" err="short write" expectedBytes=11843 actualBytes=10240 +I0507 11:59:37.915108 4726 prober.go:107] "Probe failed" probeType="Readiness" pod="agent-management-dev-002/agent-management-api-7ff7b9b9-k9nft" podUID="9893f9ac-f3e4-41fb-8da7-592061d2386c" containerName="agent-management-api" probeResult="failure" output="HTTP probe failed with statuscode: 400" +I0507 11:59:37.913494 4727 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-h9bx" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36499 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36499 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36499 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +run-containerd-io.containerd.runtime.v2.task-k8s.io-fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a-rootfs.mount: Deactivated successfully. +cri-containerd-fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a.scope: Consumed 16.023s CPU time. +cri-containerd-fc7a558bca122d6b5fb9aa81e62a87053c8a6a84945fd7a5fd4508d7cbc0878a.scope: Deactivated successfully. +I0507 11:59:37.726128 3089 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="kafka/kafka-broker-1" secret="" err="secret \"gcr\" not found" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36497 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36497 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36497 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:37.667321 2776 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=victor-grafana-5f4c7cbdf7-bwfdl_hosted-grafana(525f5872-d605-4bd1-93b5-2ac890004c88)\"" pod="hosted-grafana/victor-grafana-5f4c7cbdf7-bwfdl" podUID="525f5872-d605-4bd1-93b5-2ac890004c88" +I0507 11:59:37.666642 2776 scope.go:117] "RemoveContainer" containerID="34da1ae22805bba50bdf08001da7492e265427af617701a829edfe1ca14fc152" +I0507 11:59:37.628819 4775 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xzrv" status="Running" +E0507 11:59:37.604573 4586 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\\\"\"" pod="hosted-grafana/ephemeral1180076306436hairyhe-grafana-58c766bdfb-87bmx" podUID="93917033-fece-4ffc-b04e-5dbdfc534657" +E0507 11:59:37.574330 4578 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-4kt2b_hosted-grafana(4fcb02de-77ce-4324-b879-5ad84d1ba5a4)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-4kt2b" podUID="4fcb02de-77ce-4324-b879-5ad84d1ba5a4" +I0507 11:59:37.573819 4578 scope.go:117] "RemoveContainer" containerID="7ecf9a69091f8fbee2555f938ad0737da2887c0b0188bbcea8c5736e8fa628ff" +E0507 11:59:37.570564 4595 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=pyroscopecanaries-grafana-5fb895ff5c-qg8fq_hosted-grafana(6c11e383-592a-4250-9bea-ac8003a507b9)\"" pod="hosted-grafana/pyroscopecanaries-grafana-5fb895ff5c-qg8fq" podUID="6c11e383-592a-4250-9bea-ac8003a507b9" +E0507 11:59:37.570104 4595 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-759c97b668-s8bxs_hosted-grafana(bfa4c7a9-f32e-4075-b044-efc1342b6234)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-759c97b668-s8bxs" podUID="bfa4c7a9-f32e-4075-b044-efc1342b6234" +I0507 11:59:37.569718 4595 scope.go:117] "RemoveContainer" containerID="2bcb4df11d62f3277600ff9d5680d84674ea998b36e762750e2fa214f96ec43a" +I0507 11:59:37.569430 4595 scope.go:117] "RemoveContainer" containerID="c4b8dcfc6d5278f921fd4d84b7cb58190be6f914af5f93fb9f306bd4714d7664" +E0507 11:59:37.526480 4615 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=dev05devuseast0test-grafana-74755ff84c-8glnl_hosted-grafana(fa354582-d6f8-49fe-b75f-7b27131dc41f)\"" pod="hosted-grafana/dev05devuseast0test-grafana-74755ff84c-8glnl" podUID="fa354582-d6f8-49fe-b75f-7b27131dc41f" +I0507 11:59:37.525021 4615 scope.go:117] "RemoveContainer" containerID="37e8f7a22bb264a186c54dd50c0abbdf94f56c024a357e26a462160835aa224e" +E0507 11:59:37.414253 4589 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ErrImagePull: \"[rpc error: code = NotFound desc = failed to pull and unpack image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\\\": failed to resolve reference \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\\\": us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2: not found, failed to pull and unpack image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\\\": failed to resolve reference \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\\\": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/10.1.0-ephemeral-oss-77506-8314-2: 403 Forbidden]\"" pod="hosted-grafana/ephemeral1511182177506ashharr-grafana-7666b574d5-22xlb" podUID="dd9f7420-7d07-4def-b1b1-5f1d0a8a7ca8" + while [ "$(pidof plugins-pause)" = "" ]; do sleep 0.5; done; + ln --force -s /proc/$(pidof hgrun-pause)/root/bin/hgrun /bin/hgrun; +E0507 11:59:37.414181 4589 kuberuntime_manager.go:1256] container &Container{Name:grafana,Image:us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2,Command:[/bin/sh],Args:[-c set -e; while [ "$(pidof hgrun-pause)" = "" ]; do sleep 0.5; done; + exec /bin/hgrun -log.level=debug launch -bundledPluginsManifest /proc/$(pidof plugins-pause)/root/manifest.json -bundledPluginsDir /proc/$(pidof plugins-pause)/root/plugins],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:80,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:10000,Protocol:TCP,HostIP:,},ContainerPort{Name:profiling,HostPort:0,ContainerPort:6060,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:HG_API,Value:http://hosted-grafana-api,ValueFrom:nil,},EnvVar{Name:HG_INSTANCE_SLUG,Value:ephemeral1511182177506ashharr,ValueFrom:nil,},EnvVar{Name:HG_INSTANCE_SECRET,Value:dea83588a727490d3795d736e53d48bbee310ae2,ValueFrom:nil,},EnvVar{Name:EXTRA_OPTIONS,Value:-profile -profile-port=6060 -profile-addr=0.0.0.0,ValueFrom:nil,},EnvVar{Name:HG_CREATE_TIME_MS,Value:1715081284620,ValueFrom:nil,},EnvVar{Name:HG_PULL_POLICY,Value:Always,ValueFrom:nil,},EnvVar{Name:HG_START_REASON,Value:active,ValueFrom:nil,},EnvVar{Name:HGRUN_SECURE_PLUGINS,Value:false,ValueFrom:nil,},EnvVar{Name:HGRUN_PLUGIN_RUNNER_ROOT_CA,Value:false,ValueFrom:nil,},EnvVar{Name:OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,Value:http://jaeger-agent.jaeger.svc.cluster.local:4317,ValueFrom:nil,},EnvVar{Name:JAEGER_SAMPLER_PARAM,Value:1,ValueFrom:nil,},EnvVar{Name:OTEL_RESOURCE_ATTRIBUTES,Value:cluster=dev-us-central-0,namespace=hosted-grafana,ValueFrom:nil,},EnvVar{Name:HG_PROBE_PATH,Value:/api/health,ValueFrom:nil,},EnvVar{Name:HGRUN_EXIT_ON_PLUGIN_FAIL,Value:true,ValueFrom:nil,},EnvVar{Name:HGRUN_PLUGIN_INSTALL_RETRIES,Value:2,ValueFrom:nil,},EnvVar{Name:HGRUN_PLUGIN_INSTALL_CONCURRENCY,Value:1,ValueFrom:nil,},EnvVar{Name:HGRUN_LAUNCH_TIMEOUT,Value:3m0s,ValueFrom:nil,},EnvVar{Name:GOMEMLIMIT,Value:429496730,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{26 -3} {} 26m DecimalSI},memory: {{293601280 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/api/health,Port:{0 80 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:10,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/bin/hgrun check],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/hgrun drain -timeout 1m0s -waitTime 55s],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[SYS_PTRACE],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ephemeral1511182177506ashharr-grafana-7666b574d5-22xlb_hosted-grafana(dd9f7420-7d07-4def-b1b1-5f1d0a8a7ca8): ErrImagePull: [rpc error: code = NotFound desc = failed to pull and unpack image "us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2": failed to resolve reference "us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2": us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2: not found, failed to pull and unpack image "us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2": failed to resolve reference "us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/10.1.0-ephemeral-oss-77506-8314-2: 403 Forbidden] +E0507 11:59:37.413937 4589 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/10.1.0-ephemeral-oss-77506-8314-2: 403 Forbidden" image="us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2" +time="2024-05-07T11:59:37.413721567Z" level=info msg="stop pulling image us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2: active requests=0, bytes read=4401" +time="2024-05-07T11:59:37.413617100Z" level=error msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\" failed" error="failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/10.1.0-ephemeral-oss-77506-8314-2: 403 Forbidden" +audit: type=1400 audit(1715083177.403:79): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36490 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36492 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36492 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36492 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36490 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36490 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36490 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083177.399:78): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36484 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083177.399:77): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36484 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083177.399:76): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36484 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36484 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36484 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36484 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +time="2024-05-07T11:59:37.393546850Z" level=info msg="StartContainer for \"4346b5c7d89f0d5b24a4b190740ed7327d12365cbd86dd55f845a8416cb1824e\" returns successfully" +Started libcontainer container 4346b5c7d89f0d5b24a4b190740ed7327d12365cbd86dd55f845a8416cb1824e. +time="2024-05-07T11:59:37.344257745Z" level=info msg="StartContainer for \"4346b5c7d89f0d5b24a4b190740ed7327d12365cbd86dd55f845a8416cb1824e\"" +time="2024-05-07T11:59:37.343701838Z" level=info msg="CreateContainer within sandbox \"ac0defb47ab561e39c01453f80823086daf554758865a65d1cb608092c1539d5\" for &ContainerMetadata{Name:hgrun,Attempt:0,} returns container id \"4346b5c7d89f0d5b24a4b190740ed7327d12365cbd86dd55f845a8416cb1824e\"" +time="2024-05-07T11:59:37.335849942Z" level=info msg="CreateContainer within sandbox \"ac0defb47ab561e39c01453f80823086daf554758865a65d1cb608092c1539d5\" for container &ContainerMetadata{Name:hgrun,Attempt:0,}" +time="2024-05-07T11:59:37.334816429Z" level=info msg="PullImage \"us.gcr.io/hosted-grafana/hgrun:0.1.452\"" +I0507 11:59:37.334399 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hgrun) is not from ACR, return empty authentication +time="2024-05-07T11:59:37.333654615Z" level=info msg="PullImage \"us.gcr.io/hosted-grafana/hgrun:0.1.452\" returns image reference \"sha256:9fb1bce3e4a228f50768d21842cd7d7fafc1d586eaa0326c9d3c86d79a36868a\"" +time="2024-05-07T11:59:37.333615314Z" level=info msg="Pulled image \"us.gcr.io/hosted-grafana/hgrun:0.1.452\" with image id \"sha256:9fb1bce3e4a228f50768d21842cd7d7fafc1d586eaa0326c9d3c86d79a36868a\", repo tag \"us.gcr.io/hosted-grafana/hgrun:0.1.452\", repo digest \"us.gcr.io/hosted-grafana/hgrun@sha256:b492dbbbee9faf9dba63c9fd89e6f9e148239765454c6a54c4284a2828dec153\", size \"19109699\" in 3.751908524s" +time="2024-05-07T11:59:37.332685003Z" level=info msg="ImageCreate event name:\"us.gcr.io/hosted-grafana/hgrun@sha256:b492dbbbee9faf9dba63c9fd89e6f9e148239765454c6a54c4284a2828dec153\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +time="2024-05-07T11:59:37.331316686Z" level=info msg="ImageUpdate event name:\"us.gcr.io/hosted-grafana/hgrun:0.1.452\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +time="2024-05-07T11:59:37.329165060Z" level=info msg="ImageCreate event name:\"sha256:9fb1bce3e4a228f50768d21842cd7d7fafc1d586eaa0326c9d3c86d79a36868a\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +time="2024-05-07T11:59:37.328382550Z" level=info msg="stop pulling image us.gcr.io/hosted-grafana/hgrun:0.1.452: active requests=0, bytes read=15714567" +time="2024-05-07T11:59:37.328138947Z" level=info msg="ImageCreate event name:\"us.gcr.io/hosted-grafana/hgrun:0.1.452\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +run-containerd-io.containerd.runtime.v2.task-k8s.io-6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408-rootfs.mount: Deactivated successfully. +cri-containerd-6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408.scope: Consumed 21.501s CPU time. +cri-containerd-6ad3e55547f2192f865518e75009243418b177091c1c781236e2ac8f0324b408.scope: Deactivated successfully. +E0507 11:59:37.252214 4736 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ksm\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=ksm pod=new-relic-nri-bundle-nrk8s-ksm-6c785668f5-jcxh2_integration(f7cc3cca-2ffb-4fde-a73e-a4ba8b0f6b3c)\"" pod="integration/new-relic-nri-bundle-nrk8s-ksm-6c785668f5-jcxh2" podUID="f7cc3cca-2ffb-4fde-a73e-a4ba8b0f6b3c" +I0507 11:59:37.251511 4736 scope.go:117] "RemoveContainer" containerID="dd4a230cd9a8d74428dafba521d8e57bdec75e75b6cb2c51d6f7b801d2bf0d0e" +time="2024-05-07T11:59:37.204502542Z" level=info msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\"" +E0507 11:59:37.204057 4589 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = NotFound desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\": us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2: not found" image="us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2" +time="2024-05-07T11:59:37.203800966Z" level=info msg="stop pulling image us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2: active requests=0, bytes read=6802" +time="2024-05-07T11:59:37.203750262Z" level=error msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\" failed" error="rpc error: code = NotFound desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\": us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2: not found" +time="2024-05-07T11:59:37.202055445Z" level=info msg="trying next host - response was http.StatusNotFound" host=us.gcr.io +audit: type=1400 audit(1715083177.183:75): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36423 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083177.183:74): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36423 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083177.183:73): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36423 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36423 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36423 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36423 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:37.145947 4573 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-zbcz" status="Running" +I0507 11:59:37.134905 4734 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-5rdf" status="Running" +I0507 11:59:37.133005 3782 prober.go:107] "Probe failed" probeType="Readiness" pod="loki-dev-014/loki-dev-014-rollout-operator-58fc68b876-2qhmp" podUID="e6504036-2514-4ecc-b78c-c47061f60c9f" containerName="rollout-operator" probeResult="failure" output="HTTP probe failed with statuscode: 500" +run-containerd-runc-k8s.io-e5f17d69eee483ec8d43b26d5d628246984ba92f794ee5f3748935f5b6448b9b-runc.6eAyHn.mount: Deactivated successfully. +ll header: 00000000: 42 01 0a 80 00 8f 42 01 0a 80 00 01 08 00 +IPv4: martian source 10.132.135.59 from 10.132.135.75, on dev eth0 +ll header: 00000000: 42 01 0a 80 00 8f 42 01 0a 80 00 01 08 00 +IPv4: martian source 10.132.135.59 from 10.132.135.75, on dev eth0 +net_ratelimit: 2 callbacks suppressed +time="2024-05-07T11:59:37.051802942Z" level=info msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\"" +E0507 11:59:37.041999 4589 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.0.2\\\"\"" pod="hosted-grafana/johan6-grafana-796656fd69-6rfnm" podUID="f6fe6c71-2a0e-4797-bd0c-2b508d1287fe" +E0507 11:59:37.025280 4585 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6439-933-1\\\"\"" pod="hosted-grafana/ephemeral1180076306439dafyddt-grafana-85fd4854bb-q6874" podUID="d6e144f9-ae89-463b-bcc4-c52f6eb091f8" +E0507 11:59:37.025204 4585 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=pyroscopecanaries-grafana-57b7948fdb-6m6ww_hosted-grafana(2ccf8512-b42a-470c-a988-cc464917d285)\"" pod="hosted-grafana/pyroscopecanaries-grafana-57b7948fdb-6m6ww" podUID="2ccf8512-b42a-470c-a988-cc464917d285" +I0507 11:59:37.024249 4585 scope.go:117] "RemoveContainer" containerID="c71d355da3277fec1e90b52152383d7d4579dedbc22016e693d363830632f4c5" +E0507 11:59:36.999782 4734 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pdc\" with ErrImageNeverPull: \"Container image \\\"us.gcr.io/hosted-grafana/pdc:0.1.415\\\" is not present with pull policy of Never\"" pod="pdc/private-datasource-connect-564fb6cfbb-fd2jh" podUID="ac6bc6d0-43a4-4885-9ee4-ba3441b0b527" +E0507 11:59:36.999725 4734 kuberuntime_manager.go:1256] container &Container{Name:pdc,Image:us.gcr.io/hosted-grafana/pdc:0.1.415,Command:[],Args:[-proxy.auth.ca-keys-dir=/var/run/secrets/pdc-certs -proxy.socks-server.addr=:10443 -proxy.ssh-server.addr=:2222 -proxy.use-socks-username-for-routing -proxy.api.http-address=:9182 -proxy.check-connpool-address-in-ring -memberlist.join=dns+gossip-ring.pdc.svc.cluster.local:7946 -api.http-address=:11443 -distributor.enabled=true -distributor.addr=:10444 -distributor.use-socks-username-for-routing -gateway.enabled=true -gateway.addr=:2244 -log.level=debug -certs.ca-private-key-file=/var/run/secrets/pdc-certs/ca.key -certs.ca-cert-file=/var/run/secrets/pdc-certs/ca.crt -certs.ca-pub-file=/var/run/secrets/pdc-certs/ca.pub -certs.cluster=local-k8s -shard-size=3 -graceful-shutdown-period=30s -enable-multiple-networks],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:socks,HostPort:0,ContainerPort:10443,Protocol:TCP,HostIP:,},ContainerPort{Name:ssh,HostPort:0,ContainerPort:2222,Protocol:TCP,HostIP:,},ContainerPort{Name:distributor,HostPort:0,ContainerPort:10444,Protocol:TCP,HostIP:,},ContainerPort{Name:gateway,HostPort:0,ContainerPort:2244,Protocol:TCP,HostIP:,},ContainerPort{Name:api,HostPort:0,ContainerPort:11443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{250 -3} {} 250m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:pdc-certs,ReadOnly:true,MountPath:/var/run/secrets/pdc-certs,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-pjvg4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{0 11443 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:40,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/sleep 5],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Never,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod private-datasource-connect-564fb6cfbb-fd2jh_pdc(ac6bc6d0-43a4-4885-9ee4-ba3441b0b527): ErrImageNeverPull: Container image "us.gcr.io/hosted-grafana/pdc:0.1.415" is not present with pull policy of Never +I0507 11:59:36.923242 4745 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-j2vt" status="Running" +audit: type=1400 audit(1715083176.899:72): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36386 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083176.899:71): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36386 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083176.899:70): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36386 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +kauditd_printk_skb: 41 callbacks suppressed +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36386 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36386 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36386 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36384 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36384 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36384 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:36.667375 2776 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=edwardtest-grafana-c4849b4b7-56ssx_hosted-grafana(eb8ab8ca-f0ef-4df3-923b-4f17718cd1ad)\"" pod="hosted-grafana/edwardtest-grafana-c4849b4b7-56ssx" podUID="eb8ab8ca-f0ef-4df3-923b-4f17718cd1ad" +I0507 11:59:36.666757 2776 scope.go:117] "RemoveContainer" containerID="ebebe8ca1b6122cea402a1cfb6fa758f2f8e849943cbf9c09506c00870d1d732" +E0507 11:59:36.604366 4586 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-enterprise-6372-855-1\\\"\"" pod="hosted-grafana/ephemeral1180076306372jacobso-grafana-8659d5f696-t5qdc" podUID="a0b71eed-f8bf-4875-9ddd-52a41e861a56" +E0507 11:59:36.604305 4586 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=ephemeral1180076306267marefr-grafana-7b4dcd6ddc-d9dx2_hosted-grafana(929d579c-9422-4ad1-989d-9e1edb203f70)\"" pod="hosted-grafana/ephemeral1180076306267marefr-grafana-7b4dcd6ddc-d9dx2" podUID="929d579c-9422-4ad1-989d-9e1edb203f70" +I0507 11:59:36.603479 4586 scope.go:117] "RemoveContainer" containerID="95667fbb3341dec0691e603540df17577797c88cb83c2871b860000d66c95429" +E0507 11:59:36.599130 4624 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6teststeady6-grafana-676df8f4d-2qkx4_hosted-grafana(b0e5bc5f-fa77-48da-a896-52c578fb915b)\"" pod="hosted-grafana/k6teststeady6-grafana-676df8f4d-2qkx4" podUID="b0e5bc5f-fa77-48da-a896-52c578fb915b" +I0507 11:59:36.598142 4624 scope.go:117] "RemoveContainer" containerID="e8101aad8fc0984550fb0d96d57c9e2fbc9938e7bf3fa2c25d1e6748dc3a61fc" +I0507 11:59:36.581938 4736 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-gwtz" status="Running" +E0507 11:59:36.562067 4595 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=grafana pod=ltest-grafana-74d66c7766-s2ftd_hosted-grafana(40e776a7-78a1-44d8-95ac-0c4944bb7737)\"" pod="hosted-grafana/ltest-grafana-74d66c7766-s2ftd" podUID="40e776a7-78a1-44d8-95ac-0c4944bb7737" +I0507 11:59:36.561272 4595 scope.go:117] "RemoveContainer" containerID="ea831b36e1cf141ea84a1158e1ac08c42bfe6220a73e5f2074dea1d25e9c8619" +I0507 11:59:36.560485 4595 kubelet.go:2498] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="hosted-grafana/ltest-grafana-74d66c7766-s2ftd" +E0507 11:59:36.539121 4590 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=grafana pod=k6teststeady2-grafana-7c76656c46-56dmp_hosted-grafana(724dd356-a32b-421e-b29b-b7ce1624a7f6)\"" pod="hosted-grafana/k6teststeady2-grafana-7c76656c46-56dmp" podUID="724dd356-a32b-421e-b29b-b7ce1624a7f6" +I0507 11:59:36.538134 4590 scope.go:117] "RemoveContainer" containerID="8f11d74c7851b1769f7304a41ab16112c93df08749d2241c55263fdf3731038d" +E0507 11:59:36.526633 4615 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-84322-18859-1\\\"\"" pod="hosted-grafana/ephemeral1511182184322yuritce-grafana-5fc4b65c7d-rj4p4" podUID="909719af-cfb6-4d8a-9893-f87f27af458a" +I0507 11:59:36.524260 4733 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-kv65" status="Running" +ll header: 00000000: 42 01 0a 80 00 17 42 01 0a 80 00 01 08 00 +IPv4: martian source 10.132.141.91 from 10.132.141.80, on dev eth0 +XMT: Solicit on eth0, interval 108810ms. +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36373 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36373 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36373 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:36.334923 4603 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-perf-n2s8-0-1dd3-91689928-9f87" status="Running" +Started cri-containerd-95bf586cd79d43120ff44582d4dbd2476de61744411f8515b9b2c527a41fd5d9.scope. +I0507 11:59:36.251643 4763 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x94l" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36367 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36367 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36367 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +time="2024-05-07T11:59:36.179017215Z" level=info msg="StartContainer for \"95bf586cd79d43120ff44582d4dbd2476de61744411f8515b9b2c527a41fd5d9\"" +time="2024-05-07T11:59:36.177858616Z" level=info msg="CreateContainer within sandbox \"81e019a0248a0300a328fd59f9939c3eaa1b98aa7f325a7f6e00592633275ef6\" for &ContainerMetadata{Name:checkoutservice,Attempt:3417,} returns container id \"95bf586cd79d43120ff44582d4dbd2476de61744411f8515b9b2c527a41fd5d9\"" +I0507 11:59:36.156382 4710 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-89hx" status="Running" +E0507 11:59:36.151662 4572 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-2w2wq_hosted-grafana(792f4b46-ea76-486f-917b-9603924d3303)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-2w2wq" podUID="792f4b46-ea76-486f-917b-9603924d3303" +I0507 11:59:36.150687 4572 scope.go:117] "RemoveContainer" containerID="064e5b87ec988ee1421ec77749112553895c547dee46288c0a5f0a525c69b0cf" +I0507 11:59:36.097430 4734 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vvgr" status="Running" +I0507 11:59:36.050423 4589 kubelet.go:2498] "SyncLoop (probe)" probe="readiness" status="ready" pod="hosted-grafana/ephemeral1511182187382tskarhe-grafana-7c98d5b45f-6zzkh" +I0507 11:59:36.014609 3224 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" path="/var/lib/kubelet/pods/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9/volumes" +I0507 11:59:36.006474 4732 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-hmlg" status="Running" +I0507 11:59:35.990519 4739 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-h8wf" status="Running" +E0507 11:59:35.928465 4734 pod_workers.go:1300] "Error syncing pod, skipping" err="unmounted volumes=[custom-grafana-agent], unattached volumes=[], failed to process volumes=[]: context deadline exceeded" pod="loki-dev-010/custom-grafana-agent-856948968f-6jfks" podUID="17b244cc-ecb9-4fbc-beaa-8fa47fafe013" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36365 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36365 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36365 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +time="2024-05-07T11:59:35.780801331Z" level=info msg="RemoveContainer for \"e3360016e710853e0218cca9ca86c8beda5f9ec6ff20d03df41db53900b992b5\" returns successfully" +I0507 11:59:35.776278 4729 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-nrth" status="Running" +E0507 11:59:35.772687 4602 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6teststeady4-grafana-5c4f6cd55-hvn6k_hosted-grafana(a95be6bc-a7bc-48cb-8935-f7040f91f7f9)\"" pod="hosted-grafana/k6teststeady4-grafana-5c4f6cd55-hvn6k" podUID="a95be6bc-a7bc-48cb-8935-f7040f91f7f9" +time="2024-05-07T11:59:35.772296342Z" level=info msg="RemoveContainer for \"e3360016e710853e0218cca9ca86c8beda5f9ec6ff20d03df41db53900b992b5\"" +I0507 11:59:35.771829 4602 scope.go:117] "RemoveContainer" containerID="c6da2382101cc3ca3a9a6de7b86f62dfd7b344559c7e17cecfb83f1284783adb" +I0507 11:59:35.771222 4602 scope.go:117] "RemoveContainer" containerID="e3360016e710853e0218cca9ca86c8beda5f9ec6ff20d03df41db53900b992b5" +I0507 11:59:35.771184 4602 kubelet.go:2426] "SyncLoop (PLEG): event for pod" pod="hosted-grafana/k6teststeady4-grafana-5c4f6cd55-hvn6k" event={"ID":"a95be6bc-a7bc-48cb-8935-f7040f91f7f9","Type":"ContainerDied","Data":"c6da2382101cc3ca3a9a6de7b86f62dfd7b344559c7e17cecfb83f1284783adb"} +I0507 11:59:35.771136 4602 generic.go:334] "Generic (PLEG): container finished" podID="a95be6bc-a7bc-48cb-8935-f7040f91f7f9" containerID="c6da2382101cc3ca3a9a6de7b86f62dfd7b344559c7e17cecfb83f1284783adb" exitCode=1 +E0507 11:59:35.706327 4602 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-85282-20418-1\\\"\"" pod="hosted-grafana/ephemeral1511182185282svenner-grafana-59d5c4d944-k5bkz" podUID="dab84f6b-fe11-4ee5-931a-332f264b2e9c" +E0507 11:59:35.705273 4602 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6testslow3-grafana-6b64c68f9-mtbkr_hosted-grafana(024c0105-5d91-4a11-80bf-c8d380cb5fb6)\"" pod="hosted-grafana/k6testslow3-grafana-6b64c68f9-mtbkr" podUID="024c0105-5d91-4a11-80bf-c8d380cb5fb6" +I0507 11:59:35.704599 4602 scope.go:117] "RemoveContainer" containerID="d9b2a552b0a50add51394b346ec1f55e55cdec603101ab963aecd48029badf68" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36363 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36363 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36363 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:35.674912 4773 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dqf8" status="Running" +E0507 11:59:35.613614 4602 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=ephemeral1511182177667ryantxu-grafana-5d7fbd766b-ddcnh_hosted-grafana(1ecbad00-a542-411b-b8af-323a1f1fba79)\"" pod="hosted-grafana/ephemeral1511182177667ryantxu-grafana-5d7fbd766b-ddcnh" podUID="1ecbad00-a542-411b-b8af-323a1f1fba79" +I0507 11:59:35.612867 4602 scope.go:117] "RemoveContainer" containerID="5a800f214c789ee2b7060e17c9fe29e1fe5ed3f7067dd08bba14a005ec2e9bfd" +I0507 11:59:35.612332 4602 kubelet.go:2498] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="hosted-grafana/ephemeral1511182177667ryantxu-grafana-5d7fbd766b-ddcnh" +I0507 11:59:35.572523 4770 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-rvzj" status="Running" +XMT: Solicit on eth0, interval 117800ms. +E0507 11:59:35.487872 4724 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=aws-dev-app-0_integration(f9fb0aca-946a-4fdc-ba53-f48bfbc47103)\"" pod="integration/aws-dev-app-0" podUID="f9fb0aca-946a-4fdc-ba53-f48bfbc47103" +I0507 11:59:35.487049 4724 scope.go:117] "RemoveContainer" containerID="2a2e04c10ced384f8ccd26986b172c42fcee911abda737fd8129030cc6a556b3" +I0507 11:59:35.461697 4631 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-jfjs" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36361 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36361 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36361 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:35.349860 4598 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-645fd656b4-q2qth_hosted-grafana(24c22429-43c3-4ad6-8b79-d42030a9204d)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-645fd656b4-q2qth" podUID="24c22429-43c3-4ad6-8b79-d42030a9204d" +I0507 11:59:35.349342 4598 scope.go:117] "RemoveContainer" containerID="5cf3d07ffea04d45c8d68600b11896b2559d462b331d1a5c628c0163e21537c3" +I0507 11:59:35.294796 4772 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-sskn" status="Running" +I0507 11:59:35.248171 4735 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xstn" status="Running" +XMT: Solicit on eth0, interval 123690ms. +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36359 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36359 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36359 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:35.152016 4572 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 2m40s restarting failed container=grafana pod=ephemeral1511182179279sarahzi-grafana-7f464d7c8d-sqbkq_hosted-grafana(8d5619e6-389c-46a0-996f-8b20cdac2937)\"" pod="hosted-grafana/ephemeral1511182179279sarahzi-grafana-7f464d7c8d-sqbkq" podUID="8d5619e6-389c-46a0-996f-8b20cdac2937" +I0507 11:59:35.151138 4572 scope.go:117] "RemoveContainer" containerID="34efa66e260e563bd703e808d5efbda0e6f8d6882a5eb3982bcb1e4fd3826a13" +I0507 11:59:35.029584 3224 kubelet.go:2408] "SyncLoop REMOVE" source="api" pods=["hosted-grafana/hosted-grafana-api-7b6bd9b949-9csb4"] +E0507 11:59:35.024914 4585 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77487-8287-1\\\"\"" pod="hosted-grafana/ephemeral1511182177487torkelo-grafana-8589fc9df9-7795x" podUID="57dbe932-52d4-4144-9577-ecad74ad8d52" +I0507 11:59:35.017930 3224 kubelet.go:2414] "SyncLoop DELETE" source="api" pods=["hosted-grafana/hosted-grafana-api-7b6bd9b949-9csb4"] +time="2024-05-07T11:59:35.010488825Z" level=info msg="cleaning up dead shim" namespace=k8s.io +time="2024-05-07T11:59:35.010474358Z" level=warning msg="cleaning up after shim disconnected" id=c6da2382101cc3ca3a9a6de7b86f62dfd7b344559c7e17cecfb83f1284783adb namespace=k8s.io +time="2024-05-07T11:59:35.010405059Z" level=info msg="shim disconnected" id=c6da2382101cc3ca3a9a6de7b86f62dfd7b344559c7e17cecfb83f1284783adb namespace=k8s.io +I0507 11:59:34.965425 3224 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e"} err="failed to get container status \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\": rpc error: code = NotFound desc = an error occurred when try to find container \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\": not found" +E0507 11:59:34.965384 3224 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\": not found" containerID="c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e" +time="2024-05-07T11:59:34.965205507Z" level=error msg="ContainerStatus for \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" failed" error="rpc error: code = NotFound desc = an error occurred when try to find container \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\": not found" +I0507 11:59:34.964947 3224 scope.go:117] "RemoveContainer" containerID="c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e" +time="2024-05-07T11:59:34.964793584Z" level=info msg="RemoveContainer for \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" returns successfully" +Removed slice libcontainer container kubepods-burstable-pod25cb986c_3d6c_4ed0_abf3_ee59ed6175f9.slice. +time="2024-05-07T11:59:34.959179289Z" level=info msg="RemoveContainer for \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\"" +I0507 11:59:34.956840 3224 scope.go:117] "RemoveContainer" containerID="c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e" +I0507 11:59:34.956820 3224 kubelet.go:2430] "SyncLoop (PLEG): event for pod" pod="hosted-grafana/hosted-grafana-api-7b6bd9b949-9csb4" event={"ID":"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9","Type":"ContainerDied","Data":"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a"} +I0507 11:59:34.956796 3224 kubelet.go:2430] "SyncLoop (PLEG): event for pod" pod="hosted-grafana/hosted-grafana-api-7b6bd9b949-9csb4" event={"ID":"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9","Type":"ContainerDied","Data":"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e"} +I0507 11:59:34.956755 3224 generic.go:334] "Generic (PLEG): container finished" podID="25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" containerID="c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e" exitCode=1 +I0507 11:59:34.936025 3224 reconciler_common.go:300] "Volume detached for volume \"pdc-certs\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs\") on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\"" +I0507 11:59:34.935988 3224 reconciler_common.go:300] "Volume detached for volume \"kube-api-access-95j2t\" (UniqueName: \"kubernetes.io/projected/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-kube-api-access-95j2t\") on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\"" +I0507 11:59:34.935951 3224 reconciler_common.go:300] "Volume detached for volume \"gcs-serviceaccount\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-gcs-serviceaccount\") on node \"ip-10-60-2-58.us-east-2.compute.internal\" DevicePath \"\"" +E0507 11:59:34.923984 3027 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysqld-exporter\" with CreateContainerConfigError: \"secret \\\"testcrossplane-user-exporter\\\" not found\"" pod="crossplane-playground/testcrossplane-exporter-c67cfc58f-vbzl4" podUID="3d49134d-3378-4ec3-824c-5ff4ea2590a5" +E0507 11:59:34.923938 3027 kuberuntime_manager.go:1261] container &Container{Name:mysqld-exporter,Image:prom/mysqld-exporter:v0.13.0,Command:[],Args:[--collect.info_schema.innodb_metrics],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:MYSQL_USER,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:username,Optional:nil,},},},EnvVar{Name:MYSQL_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:password,Optional:nil,},},},EnvVar{Name:MYSQL_HOST,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:endpoint,Optional:nil,},},},EnvVar{Name:MYSQL_PORT,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:testcrossplane-user-exporter,},Key:port,Optional:nil,},},},EnvVar{Name:MYSQL_TLS_MODE,Value:preferred,ValueFrom:nil,},EnvVar{Name:DATA_SOURCE_NAME,Value:$(MYSQL_USER):$(MYSQL_PASSWORD)@tcp($(MYSQL_HOST):$(MYSQL_PORT))/?tls=$(MYSQL_TLS_MODE),ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dzx7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod testcrossplane-exporter-c67cfc58f-vbzl4_crossplane-playground(3d49134d-3378-4ec3-824c-5ff4ea2590a5): CreateContainerConfigError: secret "testcrossplane-user-exporter" not found +I0507 11:59:34.921271 4731 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7jmw" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36348 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36348 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36348 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:34.856101 4727 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana-render-security\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-security:0.1.181\\\"\"" pod="integration/grafana-render-service-cbff479fc-cj9tp" podUID="0e3114d1-2f3a-49d6-a71d-dbc75050d8e0" +I0507 11:59:34.855593 4771 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-s6kw" status="Running" +I0507 11:59:34.854084 4727 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="integration/grafana-render-service-cbff479fc-cj9tp" secret="" err="secret \"us-gcr-io-hosted-grafana\" not found" +I0507 11:59:34.841447 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-gcs-serviceaccount" (OuterVolumeSpecName: "gcs-serviceaccount") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "gcs-serviceaccount". PluginName "kubernetes.io/secret", VolumeGidValue "" +I0507 11:59:34.841404 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-kube-api-access-95j2t" (OuterVolumeSpecName: "kube-api-access-95j2t") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "kube-api-access-95j2t". PluginName "kubernetes.io/projected", VolumeGidValue "" +I0507 11:59:34.836955 3224 operation_generator.go:888] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs" (OuterVolumeSpecName: "pdc-certs") pod "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" (UID: "25cb986c-3d6c-4ed0-abf3-ee59ed6175f9"). InnerVolumeSpecName "pdc-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" +I0507 11:59:34.834835 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"gcs-serviceaccount\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-gcs-serviceaccount\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") " +I0507 11:59:34.834794 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"pdc-certs\" (UniqueName: \"kubernetes.io/secret/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-pdc-certs\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") " +I0507 11:59:34.834734 3224 reconciler_common.go:172] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95j2t\" (UniqueName: \"kubernetes.io/projected/25cb986c-3d6c-4ed0-abf3-ee59ed6175f9-kube-api-access-95j2t\") pod \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\" (UID: \"25cb986c-3d6c-4ed0-abf3-ee59ed6175f9\") " +run-containerd-io.containerd.runtime.v2.task-k8s.io-c6da2382101cc3ca3a9a6de7b86f62dfd7b344559c7e17cecfb83f1284783adb-rootfs.mount: Deactivated successfully. +E0507 11:59:34.739637 4738 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ruler\" with CreateContainerConfigError: \"secret \\\"ruler-alertmanager-token\\\" not found\"" pod="ge-metrics-federation/gem-mimir-ruler-bd7cbc8cb-fpvxg" podUID="f39fa140-2a71-4cba-bcb7-b37b2fafa343" +E0507 11:59:34.739556 4738 kuberuntime_manager.go:1256] container &Container{Name:ruler,Image:grafana/enterprise-metrics:v2.11.1,Command:[],Args:[-target=ruler -config.expand-env=true -config.file=/etc/mimir/mimir.yaml],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:9095,Protocol:TCP,HostIP:,},ContainerPort{Name:memberlist,HostPort:0,ContainerPort:7946,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:JAEGER_AGENT_HOST,Value:jaeger-agent.jaeger.svc.cluster.local.,ValueFrom:nil,},EnvVar{Name:JAEGER_TAGS,Value:namespace=ge-metrics-federation,cluster=dev-us-central-0,ValueFrom:nil,},EnvVar{Name:JAEGER_SAMPLER_MANAGER_HOST_PORT,Value:http://jaeger-agent.jaeger.svc.cluster.local.:5778/sampling,ValueFrom:nil,},EnvVar{Name:GOOGLE_APPLICATION_CREDENTIALS,Value:/var/secrets/google/credentials.json,ValueFrom:nil,},EnvVar{Name:AM_TOKEN,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:ruler-alertmanager-token,},Key:token,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{134217728 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:gcs-credentials,ReadOnly:false,MountPath:/var/secrets/google/,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:config,ReadOnly:false,MountPath:/etc/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:license,ReadOnly:false,MountPath:/license,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:runtime-config,ReadOnly:false,MountPath:/var/mimir,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:storage,ReadOnly:false,MountPath:/data,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:active-queries,ReadOnly:false,MountPath:/active-query-tracker,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-xf5ns,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{1 0 http-metrics},Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:45,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod gem-mimir-ruler-bd7cbc8cb-fpvxg_ge-metrics-federation(f39fa140-2a71-4cba-bcb7-b37b2fafa343): CreateContainerConfigError: secret "ruler-alertmanager-token" not found +cri-containerd-c6da2382101cc3ca3a9a6de7b86f62dfd7b344559c7e17cecfb83f1284783adb.scope: Consumed 15.899s CPU time. +cri-containerd-c6da2382101cc3ca3a9a6de7b86f62dfd7b344559c7e17cecfb83f1284783adb.scope: Deactivated successfully. +time="2024-05-07T11:59:34.707025668Z" level=info msg="StopPodSandbox for \"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\" returns successfully" +time="2024-05-07T11:59:34.706960850Z" level=info msg="TearDown network for sandbox \"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\" successfully" +I0507 11:59:34.703072 3224 kubelet.go:2414] "SyncLoop DELETE" source="api" pods=["hosted-grafana/hosted-grafana-api-7b6bd9b949-9csb4"] +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36346 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36346 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36346 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +time="2024-05-07T11:59:34.624594305Z" level=info msg="cleaning up dead shim" namespace=k8s.io +time="2024-05-07T11:59:34.624582051Z" level=warning msg="cleaning up after shim disconnected" id=c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a namespace=k8s.io +time="2024-05-07T11:59:34.624527314Z" level=info msg="shim disconnected" id=c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a namespace=k8s.io +E0507 11:59:34.604339 4586 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-84322-18859-1\\\"\"" pod="hosted-grafana/ephemeral1511182184322yuritce-grafana-84496d949-4gc7m" podUID="8b778f8a-7749-4b2f-ac6d-aafa8cd32119" +E0507 11:59:34.603913 4586 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=ephemeral1511182179279sarahzi-grafana-c5dc5d5-krs7z_hosted-grafana(baa23492-a636-4134-8908-391631902931)\"" pod="hosted-grafana/ephemeral1511182179279sarahzi-grafana-c5dc5d5-krs7z" podUID="baa23492-a636-4134-8908-391631902931" +I0507 11:59:34.603362 4586 scope.go:117] "RemoveContainer" containerID="8f3e15e7b4631f5c3b33aa44572fdcc313ba99fcd9e4e1998b464ab93624b2e7" +time="2024-05-07T11:59:34.592084495Z" level=info msg="Container to stop \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" must be in running or unknown state, current state \"CONTAINER_EXITED\"" +time="2024-05-07T11:59:34.592005066Z" level=info msg="StopPodSandbox for \"c605ad2cdc74c6b5288f2532ad71cce81a28ef6965f97a89ff6609deb825553a\"" +time="2024-05-07T11:59:34.591282703Z" level=info msg="StopContainer for \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" returns successfully" +time="2024-05-07T11:59:34.574306831Z" level=info msg="cleaning up dead shim" namespace=k8s.io +time="2024-05-07T11:59:34.574295800Z" level=warning msg="cleaning up after shim disconnected" id=c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e namespace=k8s.io +time="2024-05-07T11:59:34.574248328Z" level=info msg="shim disconnected" id=c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e namespace=k8s.io +E0507 11:59:34.573378 4595 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6teststeady2-grafana-bd47b47d4-28b6n_hosted-grafana(91385f28-bb54-4fd7-a913-0dea0a3361d4)\"" pod="hosted-grafana/k6teststeady2-grafana-bd47b47d4-28b6n" podUID="91385f28-bb54-4fd7-a913-0dea0a3361d4" +I0507 11:59:34.571158 4595 scope.go:117] "RemoveContainer" containerID="f3b57fb64bedb2f63a9a73a2c7083c0808d8c9e24dc89b6690b1477ae9bb6cab" +I0507 11:59:34.538435 4590 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="grafana-apps/loki-datasource-grafana-app-fast-5f845744dd-tpmtm" secret="" err="secret \"dockerhub\" not found" +time="2024-05-07T11:59:34.520032214Z" level=info msg="Stop container \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" with signal terminated" +time="2024-05-07T11:59:34.519591759Z" level=info msg="StopContainer for \"c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e\" with timeout 30 (s)" +I0507 11:59:34.518822 3224 kuberuntime_container.go:745] "Killing container with a grace period" pod="hosted-grafana/hosted-grafana-api-7b6bd9b949-9csb4" podUID="25cb986c-3d6c-4ed0-abf3-ee59ed6175f9" containerName="hgapi" containerID="containerd://c91436db00920ec961b9d5d6b4859d80a912e862e34fb5c45d8a85684fe6a97e" gracePeriod=30 +I0507 11:59:34.518597 3224 kubelet.go:2414] "SyncLoop DELETE" source="api" pods=["hosted-grafana/hosted-grafana-api-7b6bd9b949-9csb4"] +I0507 11:59:34.501779 4602 kubelet.go:2498] "SyncLoop (probe)" probe="readiness" status="ready" pod="hosted-grafana/k6teststeady4-grafana-5c4f6cd55-hvn6k" +I0507 11:59:34.453214 4736 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-nbp9" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36336 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36336 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36336 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:34.353776 4585 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ErrImagePull: \"[rpc error: code = NotFound desc = failed to pull and unpack image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\\\": failed to resolve reference \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\\\": us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1: not found, failed to pull and unpack image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\\\": failed to resolve reference \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\\\": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/11.1.0-ephemeral-6436-938-1: 403 Forbidden]\"" pod="hosted-grafana/ephemeral1180076306436hairyhe-grafana-86cb5688bb-mtgwf" podUID="640ad907-3089-47b9-89c7-4b59e6b685a5" + while [ "$(pidof plugins-pause)" = "" ]; do sleep 0.5; done; + ln --force -s /proc/$(pidof hgrun-pause)/root/bin/hgrun /bin/hgrun; +E0507 11:59:34.353716 4585 kuberuntime_manager.go:1256] container &Container{Name:grafana,Image:us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1,Command:[/bin/sh],Args:[-c set -e; while [ "$(pidof hgrun-pause)" = "" ]; do sleep 0.5; done; + exec /bin/hgrun -log.level=debug launch -bundledPluginsManifest /proc/$(pidof plugins-pause)/root/manifest.json -bundledPluginsDir /proc/$(pidof plugins-pause)/root/plugins],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:80,Protocol:TCP,HostIP:,},ContainerPort{Name:grpc,HostPort:0,ContainerPort:10000,Protocol:TCP,HostIP:,},ContainerPort{Name:profiling,HostPort:0,ContainerPort:6060,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:HG_API,Value:http://hosted-grafana-api,ValueFrom:nil,},EnvVar{Name:HG_INSTANCE_SLUG,Value:ephemeral1180076306436hairyhe,ValueFrom:nil,},EnvVar{Name:HG_INSTANCE_SECRET,Value:67f2b6eeb2d75a30d764feee937887e7f735a612,ValueFrom:nil,},EnvVar{Name:EXTRA_OPTIONS,Value:-profile -profile-port=6060 -profile-addr=0.0.0.0,ValueFrom:nil,},EnvVar{Name:HG_CREATE_TIME_MS,Value:1715081910810,ValueFrom:nil,},EnvVar{Name:HG_PULL_POLICY,Value:Always,ValueFrom:nil,},EnvVar{Name:HG_START_REASON,Value:active,ValueFrom:nil,},EnvVar{Name:HGRUN_SECURE_PLUGINS,Value:false,ValueFrom:nil,},EnvVar{Name:HGRUN_PLUGIN_RUNNER_ROOT_CA,Value:false,ValueFrom:nil,},EnvVar{Name:OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,Value:http://jaeger-agent.jaeger.svc.cluster.local:4317,ValueFrom:nil,},EnvVar{Name:JAEGER_SAMPLER_PARAM,Value:1,ValueFrom:nil,},EnvVar{Name:OTEL_RESOURCE_ATTRIBUTES,Value:cluster=dev-us-central-0,namespace=hosted-grafana,ValueFrom:nil,},EnvVar{Name:HG_PROBE_PATH,Value:/api/health,ValueFrom:nil,},EnvVar{Name:HGRUN_EXIT_ON_PLUGIN_FAIL,Value:true,ValueFrom:nil,},EnvVar{Name:HGRUN_PLUGIN_INSTALL_RETRIES,Value:2,ValueFrom:nil,},EnvVar{Name:HGRUN_PLUGIN_INSTALL_CONCURRENCY,Value:1,ValueFrom:nil,},EnvVar{Name:HGRUN_LAUNCH_TIMEOUT,Value:3m0s,ValueFrom:nil,},EnvVar{Name:GOMEMLIMIT,Value:429496730,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{26 -3} {} 26m DecimalSI},memory: {{293601280 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/api/health,Port:{0 80 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:10,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/bin/hgrun check],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:30,PeriodSeconds:30,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/hgrun drain -timeout 1m0s -waitTime 55s],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[SYS_PTRACE],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ephemeral1180076306436hairyhe-grafana-86cb5688bb-mtgwf_hosted-grafana(640ad907-3089-47b9-89c7-4b59e6b685a5): ErrImagePull: [rpc error: code = NotFound desc = failed to pull and unpack image "us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1": failed to resolve reference "us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1": us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1: not found, failed to pull and unpack image "us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1": failed to resolve reference "us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/11.1.0-ephemeral-6436-938-1: 403 Forbidden] +E0507 11:59:34.353426 4585 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = Unknown desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/11.1.0-ephemeral-6436-938-1: 403 Forbidden" image="us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1" +time="2024-05-07T11:59:34.353141479Z" level=info msg="stop pulling image us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1: active requests=0, bytes read=4402" +time="2024-05-07T11:59:34.353117156Z" level=error msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\" failed" error="failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\": unexpected status from HEAD request to https://us.gcr.io/v2/hosted-grafana/hosted-grafana-pro/manifests/11.1.0-ephemeral-6436-938-1: 403 Forbidden" +E0507 11:59:34.183531 4578 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-80141-12952-1\\\"\"" pod="hosted-grafana/ephemeral1511182180141joshhun-grafana-945f746cd-t2dbn" podUID="5a9ab168-41a1-4427-a3c8-7f8eb23f7491" +time="2024-05-07T11:59:34.182459543Z" level=info msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\"" +E0507 11:59:34.182064 4585 remote_image.go:180] "PullImage from image service failed" err="rpc error: code = NotFound desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\": us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1: not found" image="us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1" +time="2024-05-07T11:59:34.181810210Z" level=info msg="stop pulling image us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1: active requests=0, bytes read=6802" +time="2024-05-07T11:59:34.181742330Z" level=error msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\" failed" error="rpc error: code = NotFound desc = failed to pull and unpack image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\": failed to resolve reference \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\": us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1: not found" +I0507 11:59:34.181632 2791 kubelet.go:2421] "SyncLoop (PLEG): event for pod" pod="hosted-grafana/victor-grafana-7b7bb568cc-grflq" event={"ID":"1803645b-5526-41b4-bf88-271be4827277","Type":"ContainerStarted","Data":"8a23f9c88be86595adf482aed0c24902ce397f7b5cf55c300e9aa851a5717a0d"} +time="2024-05-07T11:59:34.180182414Z" level=info msg="trying next host - response was http.StatusNotFound" host=us.gcr.io +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36334 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36334 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36334 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:34.107464 4767 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-m294" status="Running" +E0507 11:59:34.041990 4589 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=securityops-grafana-54d8cdd94c-lcvh2_hosted-grafana(ae03848f-326b-447b-965c-2e247587fef2)\"" pod="hosted-grafana/securityops-grafana-54d8cdd94c-lcvh2" podUID="ae03848f-326b-447b-965c-2e247587fef2" +I0507 11:59:34.041393 4589 scope.go:117] "RemoveContainer" containerID="c92811a8440f91cd6b065d841e3d3c8ef00c139b1754d41629933446094f0e5b" +E0507 11:59:34.035350 2952 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=edwardtest-grafana-7c84958c48-7qx52_hosted-grafana(b2e4a53a-8d70-4a70-8d03-87fd797e5cab)\"" pod="hosted-grafana/edwardtest-grafana-7c84958c48-7qx52" podUID="b2e4a53a-8d70-4a70-8d03-87fd797e5cab" +time="2024-05-07T11:59:34.034812075Z" level=info msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6436-938-1\"" +I0507 11:59:34.034307 2952 scope.go:117] "RemoveContainer" containerID="731add8ea0b2e9fdf09aebec6431636580ef50b216e74393d429b5ab92e597b4" +I0507 11:59:34.033652 6250 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="kafka/kafka-broker-1" secret="" err="secret \"gcr\" not found" +I0507 11:59:33.939013 4589 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-r8v7" status="Running" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36332 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36332 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36332 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:33.849663 4730 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-9zxx" status="Running" +E0507 11:59:33.830567 4591 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6432-916-1\\\"\"" pod="hosted-grafana/ephemeral1180076306432stephan-grafana-6795bb9d45-sxvzw" podUID="9ed9522d-b242-40b1-907b-8c8e4122c711" +E0507 11:59:33.830166 4591 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=oncalldev-grafana-88bf96b75-tfwt2_hosted-grafana(457849b1-2806-4b8c-af1b-ba17047f5234)\"" pod="hosted-grafana/oncalldev-grafana-88bf96b75-tfwt2" podUID="457849b1-2806-4b8c-af1b-ba17047f5234" +I0507 11:59:33.829279 4591 scope.go:117] "RemoveContainer" containerID="9f599128c19622f3ecba55323008ef75bfddf1f84fef95a77368a4e46d0ff1f1" +I0507 11:59:33.812487 4733 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-lgmg" status="Running" +I0507 11:59:33.738098 4624 kubelet.go:2498] "SyncLoop (probe)" probe="readiness" status="ready" pod="hosted-grafana/ephemeral1511182177076papagia-grafana-6b9bb47584-xp7pp" +E0507 11:59:33.737125 4624 prober.go:239] "Unable to write all bytes from execInContainer" err="short write" expectedBytes=11846 actualBytes=10240 +I0507 11:59:33.699298 4772 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-8nfg" status="Running" +time="2024-05-07T11:59:33.674127305Z" level=info msg="StartContainer for \"8a23f9c88be86595adf482aed0c24902ce397f7b5cf55c300e9aa851a5717a0d\" returns successfully" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36330 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36330 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36330 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +Started libcontainer container 8a23f9c88be86595adf482aed0c24902ce397f7b5cf55c300e9aa851a5717a0d. +I0507 11:59:33.596434 4737 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-cndv" status="Running" +time="2024-05-07T11:59:33.590854700Z" level=info msg="StartContainer for \"8a23f9c88be86595adf482aed0c24902ce397f7b5cf55c300e9aa851a5717a0d\"" +time="2024-05-07T11:59:33.590318994Z" level=info msg="CreateContainer within sandbox \"346dc5e0c503a6ca1f0281e3b6f8e32563ebf8a61ed467b4d0dc5b4030a115b9\" for &ContainerMetadata{Name:grafana,Attempt:0,} returns container id \"8a23f9c88be86595adf482aed0c24902ce397f7b5cf55c300e9aa851a5717a0d\"" +time="2024-05-07T11:59:33.582971705Z" level=info msg="CreateContainer within sandbox \"346dc5e0c503a6ca1f0281e3b6f8e32563ebf8a61ed467b4d0dc5b4030a115b9\" for container &ContainerMetadata{Name:grafana,Attempt:0,}" +time="2024-05-07T11:59:33.581670690Z" level=info msg="PullImage \"us.gcr.io/hosted-grafana/hgrun:0.1.452\"" +I0507 11:59:33.580418 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hgrun) is not from ACR, return empty authentication +time="2024-05-07T11:59:33.579768267Z" level=info msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\" returns image reference \"sha256:0036b00b52fc547c944c1c820817d91fba6e20775cbf4e6c3e09ad2e682dbd73\"" +time="2024-05-07T11:59:33.579726466Z" level=info msg="Pulled image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\" with image id \"sha256:0036b00b52fc547c944c1c820817d91fba6e20775cbf4e6c3e09ad2e682dbd73\", repo tag \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\", repo digest \"us.gcr.io/hosted-grafana/hosted-grafana-pro@sha256:0853965a142fb95648de3281a7c71de0d05fb51616bc32b523dc2f1da6ca06dc\", size \"173405048\" in 1.763700178s" +time="2024-05-07T11:59:33.579013658Z" level=info msg="ImageUpdate event name:\"us.gcr.io/hosted-grafana/hosted-grafana-pro@sha256:0853965a142fb95648de3281a7c71de0d05fb51616bc32b523dc2f1da6ca06dc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +time="2024-05-07T11:59:33.577566840Z" level=info msg="ImageUpdate event name:\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +time="2024-05-07T11:59:33.576106723Z" level=info msg="ImageUpdate event name:\"sha256:0036b00b52fc547c944c1c820817d91fba6e20775cbf4e6c3e09ad2e682dbd73\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +time="2024-05-07T11:59:33.575284113Z" level=info msg="stop pulling image us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397: active requests=0, bytes read=6802" +time="2024-05-07T11:59:33.575105610Z" level=info msg="ImageUpdate event name:\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +E0507 11:59:33.570763 4595 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77506-8314-2\\\"\"" pod="hosted-grafana/ephemeral1511182177506ashharr-grafana-6898b9c678-h5xpg" podUID="4a9da7ba-6a16-487f-9604-4e89cbd0c918" +E0507 11:59:33.538814 4590 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-enterprise-6372-855-1\\\"\"" pod="hosted-grafana/ephemeral1180076306372jacobso-grafana-586dcfb48b-8v6t8" podUID="656b3334-568b-408d-9642-268560046977" +I0507 11:59:33.457749 4601 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-n9zk" status="Running" +I0507 11:59:33.431432 4645 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-cz4q" status="Running" +I0507 11:59:33.422254 1537502 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x28r" status="Running" +audit: type=1400 audit(1715083173.383:28): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36297 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36297 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36297 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36297 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:33.376763 4608 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-h7jr" status="Running" +E0507 11:59:33.361757 4600 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-h67p4_hosted-grafana(a3a055b2-b554-4a41-ab79-6f070495296f)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-h67p4" podUID="a3a055b2-b554-4a41-ab79-6f070495296f" +I0507 11:59:33.360932 4600 scope.go:117] "RemoveContainer" containerID="d4402a99374d43ba1e3df7dffe05b2fee099671c2e0f07bb729186a41bb549b6" +I0507 11:59:33.356171 4645 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-z2rp" status="Running" +I0507 11:59:33.219742 4739 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vlg7" status="Running" +E0507 11:59:33.183476 4578 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=ephemeral1511182177667ryantxu-grafana-548f6c9689-4gxks_hosted-grafana(27ef7bb4-75ed-4cd9-9b96-b76cb07bee6d)\"" pod="hosted-grafana/ephemeral1511182177667ryantxu-grafana-548f6c9689-4gxks" podUID="27ef7bb4-75ed-4cd9-9b96-b76cb07bee6d" +I0507 11:59:33.182939 4578 scope.go:117] "RemoveContainer" containerID="5ba7a32d2cc7cc82e8a982949ae158d1beb142061bddf3e9f69c2637ea65b1b0" +time="2024-05-07T11:59:33.095406969Z" level=info msg="RemoveContainer for \"15651d1ecdf9bd928944f9cfb0523042b518137a440bb97f7d75923beaede053\" returns successfully" +E0507 11:59:33.090176 4592 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-vqkzm_hosted-grafana(d3742b42-2b35-4c32-8267-7cf79bbcb441)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-vqkzm" podUID="d3742b42-2b35-4c32-8267-7cf79bbcb441" +time="2024-05-07T11:59:33.090002575Z" level=info msg="RemoveContainer for \"15651d1ecdf9bd928944f9cfb0523042b518137a440bb97f7d75923beaede053\"" +I0507 11:59:33.089270 4592 scope.go:117] "RemoveContainer" containerID="d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679" +I0507 11:59:33.088811 4592 scope.go:117] "RemoveContainer" containerID="15651d1ecdf9bd928944f9cfb0523042b518137a440bb97f7d75923beaede053" +I0507 11:59:33.088779 4592 kubelet.go:2426] "SyncLoop (PLEG): event for pod" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-vqkzm" event={"ID":"d3742b42-2b35-4c32-8267-7cf79bbcb441","Type":"ContainerDied","Data":"d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679"} +I0507 11:59:33.088730 4592 generic.go:334] "Generic (PLEG): container finished" podID="d3742b42-2b35-4c32-8267-7cf79bbcb441" containerID="d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679" exitCode=1 +I0507 11:59:33.063394 4734 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dc4k" status="Running" +E0507 11:59:33.042338 4589 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.0.2\\\"\"" pod="hosted-grafana/grafana10-grafana-6c9c7c5bc4-4f2gp" podUID="df8fc793-cd73-4984-8c12-9c4d527ff219" +I0507 11:59:33.039844 4732 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-9ghc" status="Running" +I0507 11:59:32.993425 4640 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-bkfl" status="Running" +ll header: 00000000: 42 01 0a 80 00 7c 42 01 0a 80 00 01 08 00 +IPv4: martian source 10.132.101.99 from 10.132.101.62, on dev eth0 +I0507 11:59:32.964140 4707 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-4qc8" status="Running" +I0507 11:59:32.940823 4739 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-p4tv" status="Running" +audit: type=1400 audit(1715083172.883:27): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36286 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083172.883:26): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36286 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083172.883:25): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36286 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36286 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36286 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36286 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +E0507 11:59:32.873442 3304 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"gcom-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/frontend-monitoring:6a8eb5a\\\"\"" pod="faro/update-usage-28487090-xg5bt" podUID="6e8f7589-7d91-47e6-9128-7ec922779773" +E0507 11:59:32.830094 4591 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6testslow4-grafana-b5879497c-p4zx9_hosted-grafana(7458c262-83d7-4b7b-b8d4-f95db30c3e39)\"" pod="hosted-grafana/k6testslow4-grafana-b5879497c-p4zx9" podUID="7458c262-83d7-4b7b-b8d4-f95db30c3e39" +I0507 11:59:32.829402 4591 scope.go:117] "RemoveContainer" containerID="fbad2b736b62c12c779231631d3eb82fde86d6095a21d2982d457c6801be9293" +I0507 11:59:32.776172 4647 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-gmgr" status="Running" +time="2024-05-07T11:59:32.755926053Z" level=info msg="CreateContainer within sandbox \"81e019a0248a0300a328fd59f9939c3eaa1b98aa7f325a7f6e00592633275ef6\" for container &ContainerMetadata{Name:checkoutservice,Attempt:3417,}" +I0507 11:59:32.739402 4527 scope.go:117] "RemoveContainer" containerID="cdcb0619adb8e55d353b1a804a08de63dd2991fcbb1799d499e675ecae655a8e" +E0507 11:59:32.724973 4592 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679 not found: not found" probeType="Readiness" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-vqkzm" podUID="d3742b42-2b35-4c32-8267-7cf79bbcb441" containerName="grafana" +E0507 11:59:32.724932 4592 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679 not found: not found" containerID="d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679" cmd=["/bin/hgrun","check"] +E0507 11:59:32.724328 4592 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679 not found: not found" containerID="d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679" cmd=["/bin/hgrun","check"] +time="2024-05-07T11:59:32.724767806Z" level=error msg="ExecSync for \"d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679 not found: not found" +time="2024-05-07T11:59:32.724147410Z" level=error msg="ExecSync for \"d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679 not found: not found" +time="2024-05-07T11:59:32.723400665Z" level=error msg="ExecSync for \"d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679\" failed" error="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679 not found: not found" + > +E0507 11:59:32.723675 4592 remote_runtime.go:496] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = failed to exec in container: failed to load task: no running task found: task d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679 not found: not found" containerID="d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679" cmd=["/bin/hgrun","check"] + ts=2024-05-07T11:59:30.205006192Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:28.791795005Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:27.963570749Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:27.061148162Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:26.707407028Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:25.328552026Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:25.041749011Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:24.342974853Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:23.448651822Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:23.116820392Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:22.538898748Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:21.719913728Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:21.1942729Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:20.302909822Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health +I0507 11:59:32.722792 4592 prober.go:107] "Probe failed" probeType="Readiness" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-vqkzm" podUID="d3742b42-2b35-4c32-8267-7cf79bbcb441" containerName="grafana" probeResult="failure" output=< + ts=2024-05-07T11:59:19.71203793Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:19.309394819Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:18.886442843Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:18.042358889Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:17.766602037Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:17.126424933Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:16.490369589Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:16.198616007Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:15.662620401Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:15.507043882Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:15.45251028Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:15.003702316Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:14.70689258Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:14.669296859Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:14.604778237Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:14.397222806Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:14.240589331Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:14.137013019Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:14.118516695Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health + ts=2024-05-07T11:59:14.087202317Z level=error caller=http_client.go:56 app=hgrun hgrun_version=0.1.453-59-gf3f63162a msg="request failed" error="Get \"http://127.0.0.1:3000/api/health\": dial tcp 127.0.0.1:3000: connect: connection refused" method=GET url=http://127.0.0.1:3000/api/health +time="2024-05-07T11:59:32.722555689Z" level=warning msg="cleaning up after shim disconnected" id=d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679 namespace=k8s.io +time="2024-05-07T11:59:32.722569380Z" level=info msg="cleaning up dead shim" namespace=k8s.io +time="2024-05-07T11:59:32.722469678Z" level=info msg="shim disconnected" id=d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679 namespace=k8s.io +I0507 11:59:32.710370 4646 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-7chh" status="Running" +E0507 11:59:32.704958 4602 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77487-8287-1\\\"\"" pod="hosted-grafana/ephemeral1511182177487torkelo-grafana-745789578d-jmj9h" podUID="144f91fd-76a2-4ca1-9e14-ba65fe8113da" +I0507 11:59:32.619794 4729 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-6vzp" status="Running" +E0507 11:59:32.570199 4595 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 1m20s restarting failed container=grafana pod=ltest-grafana-74d66c7766-s2ftd_hosted-grafana(40e776a7-78a1-44d8-95ac-0c4944bb7737)\"" pod="hosted-grafana/ltest-grafana-74d66c7766-s2ftd" podUID="40e776a7-78a1-44d8-95ac-0c4944bb7737" +I0507 11:59:32.569510 4595 scope.go:117] "RemoveContainer" containerID="ea831b36e1cf141ea84a1158e1ac08c42bfe6220a73e5f2074dea1d25e9c8619" +E0507 11:59:32.562163 4739 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pdc\" with ErrImageNeverPull: \"Container image \\\"us.gcr.io/hosted-grafana/pdc:0.1.415\\\" is not present with pull policy of Never\"" pod="pdc/private-datasource-connect-564fb6cfbb-5k5n7" podUID="5ce47ae4-8558-422b-8246-7733512eeb96" +E0507 11:59:32.562098 4739 kuberuntime_manager.go:1256] container &Container{Name:pdc,Image:us.gcr.io/hosted-grafana/pdc:0.1.415,Command:[],Args:[-proxy.auth.ca-keys-dir=/var/run/secrets/pdc-certs -proxy.socks-server.addr=:10443 -proxy.ssh-server.addr=:2222 -proxy.use-socks-username-for-routing -proxy.api.http-address=:9182 -proxy.check-connpool-address-in-ring -memberlist.join=dns+gossip-ring.pdc.svc.cluster.local:7946 -api.http-address=:11443 -distributor.enabled=true -distributor.addr=:10444 -distributor.use-socks-username-for-routing -gateway.enabled=true -gateway.addr=:2244 -log.level=debug -certs.ca-private-key-file=/var/run/secrets/pdc-certs/ca.key -certs.ca-cert-file=/var/run/secrets/pdc-certs/ca.crt -certs.ca-pub-file=/var/run/secrets/pdc-certs/ca.pub -certs.cluster=local-k8s -shard-size=3 -graceful-shutdown-period=30s -enable-multiple-networks],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:socks,HostPort:0,ContainerPort:10443,Protocol:TCP,HostIP:,},ContainerPort{Name:ssh,HostPort:0,ContainerPort:2222,Protocol:TCP,HostIP:,},ContainerPort{Name:distributor,HostPort:0,ContainerPort:10444,Protocol:TCP,HostIP:,},ContainerPort{Name:gateway,HostPort:0,ContainerPort:2244,Protocol:TCP,HostIP:,},ContainerPort{Name:api,HostPort:0,ContainerPort:11443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{250 -3} {} 250m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:pdc-certs,ReadOnly:true,MountPath:/var/run/secrets/pdc-certs,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-fcx2w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{0 11443 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:40,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/sleep 5],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Never,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod private-datasource-connect-564fb6cfbb-5k5n7_pdc(5ce47ae4-8558-422b-8246-7733512eeb96): ErrImageNeverPull: Container image "us.gcr.io/hosted-grafana/pdc:0.1.415" is not present with pull policy of Never +ll header: 00000000: 42 01 0a 80 00 17 42 01 0a 80 00 01 08 00 +IPv4: martian source 10.132.141.91 from 10.132.141.80, on dev eth0 +I0507 11:59:32.426569 4734 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dkbf" status="Running" +I0507 11:59:32.409568 581823 cache.go:40] re-using cached key and certificate +I0507 11:59:32.405906 4578 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-5nj8" status="Running" +audit: type=1400 audit(1715083172.379:24): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36276 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083172.379:23): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36276 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083172.379:22): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36276 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36276 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36276 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36276 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:32.358966 4732 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-hmlg" status="Running" +I0507 11:59:32.223604 4646 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-hzp5" status="Running" +I0507 11:59:32.173852 2791 kubelet.go:2421] "SyncLoop (PLEG): event for pod" pod="hosted-grafana/dafdeveuwest2-grafana-546fbd789d-czx47" event={"ID":"fc6ba4ea-9950-4999-8ad2-bdc9a577fb34","Type":"ContainerStarted","Data":"7418e5784964048801b0cb8abacd0a73f4a208454fc6f5418e4f79906761c98d"} +E0507 11:59:32.151882 4572 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=e2cmigrationreceiver-grafana-6b9cf7d5c6-wsxm5_hosted-grafana(3f83ab4b-f255-4a22-a690-9f1e9b086226)\"" pod="hosted-grafana/e2cmigrationreceiver-grafana-6b9cf7d5c6-wsxm5" podUID="3f83ab4b-f255-4a22-a690-9f1e9b086226" +I0507 11:59:32.151224 4572 scope.go:117] "RemoveContainer" containerID="0339d9823b658e820b2d0535c744d982f15c63ca658cdaa9f690efd7dffdbf8c" +I0507 11:59:32.131299 4736 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-gwtz" status="Running" +I0507 11:59:32.076846 4726 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-n56x" status="Running" +I0507 11:59:32.057206 4726 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7h6b" status="Running" +E0507 11:59:31.990062 2791 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"goldpinger\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=goldpinger pod=goldpinger-69c2w_goldpinger(13276978-61bf-463a-b871-d3b5a2562070)\"" pod="goldpinger/goldpinger-69c2w" podUID="13276978-61bf-463a-b871-d3b5a2562070" +I0507 11:59:31.989696 2791 scope.go:117] "RemoveContainer" containerID="188fa7a825c7d671b5c324a4e63725c3039f85bda51fb56794e12823e6d07729" +E0507 11:59:31.928148 4734 pod_workers.go:1300] "Error syncing pod, skipping" err="unmounted volumes=[terraform-drift-detector-data], unattached volumes=[terraform-drift-detector-data], failed to process volumes=[]: context deadline exceeded" pod="terraform-drift-detector/terraform-drift-detector-d68b4c545-jg2vj" podUID="6c607496-ef26-454e-b2f2-4cb75b233fa3" +E0507 11:59:31.923713 4643 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-j7jh6_hosted-grafana(83fb0f38-728e-4050-9500-6ac9fc9f21c8)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-j7jh6" podUID="83fb0f38-728e-4050-9500-6ac9fc9f21c8" +I0507 11:59:31.923176 4643 scope.go:117] "RemoveContainer" containerID="a85b6a771be0a2165463617e0c7a4f5b42dbb5c232c57166f32a72d969a25bf1" +E0507 11:59:31.887809 4597 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.0.2\\\"\"" pod="hosted-grafana/johangrafana10-grafana-69c6449bbd-k2bgp" podUID="bb953c26-c201-4082-9b56-85ab12c1d0e1" +E0507 11:59:31.886415 4597 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6testslow6-grafana-65f9d6559b-xrs26_hosted-grafana(5c426b18-2b38-44ce-a92e-e5eeadbbb6f0)\"" pod="hosted-grafana/k6testslow6-grafana-65f9d6559b-xrs26" podUID="5c426b18-2b38-44ce-a92e-e5eeadbbb6f0" +I0507 11:59:31.885717 4597 scope.go:117] "RemoveContainer" containerID="107669a02b8d89f8f768181e2b8d64c839a1161c10d217fb0c3a2701beb32b72" +time="2024-05-07T11:59:31.883468402Z" level=info msg="StartContainer for \"7418e5784964048801b0cb8abacd0a73f4a208454fc6f5418e4f79906761c98d\" returns successfully" +audit: type=1400 audit(1715083171.875:21): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36274 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083171.875:20): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36274 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +audit: type=1400 audit(1715083171.875:19): apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36274 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36274 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36274 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +AVC apparmor="DENIED" operation="ptrace" profile="cri-containerd.apparmor.d" pid=36274 comm="pidof" requested_mask="read" denied_mask="read" peer="unconfined" +I0507 11:59:31.879715 4734 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xwpk" status="Running" +Started libcontainer container 7418e5784964048801b0cb8abacd0a73f4a208454fc6f5418e4f79906761c98d. +time="2024-05-07T11:59:31.833222796Z" level=info msg="StartContainer for \"7418e5784964048801b0cb8abacd0a73f4a208454fc6f5418e4f79906761c98d\"" +time="2024-05-07T11:59:31.832712390Z" level=info msg="CreateContainer within sandbox \"ac0defb47ab561e39c01453f80823086daf554758865a65d1cb608092c1539d5\" for &ContainerMetadata{Name:grafana,Attempt:0,} returns container id \"7418e5784964048801b0cb8abacd0a73f4a208454fc6f5418e4f79906761c98d\"" +time="2024-05-07T11:59:31.818485118Z" level=info msg="CreateContainer within sandbox \"ac0defb47ab561e39c01453f80823086daf554758865a65d1cb608092c1539d5\" for container &ContainerMetadata{Name:grafana,Attempt:0,}" +time="2024-05-07T11:59:31.815983488Z" level=info msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\"" +I0507 11:59:31.815514 2791 azure_credentials.go:220] image(us.gcr.io/hosted-grafana/hosted-grafana-pro) is not from ACR, return empty authentication +time="2024-05-07T11:59:31.814769473Z" level=info msg="PullImage \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\" returns image reference \"sha256:0036b00b52fc547c944c1c820817d91fba6e20775cbf4e6c3e09ad2e682dbd73\"" +time="2024-05-07T11:59:31.814727873Z" level=info msg="Pulled image \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\" with image id \"sha256:0036b00b52fc547c944c1c820817d91fba6e20775cbf4e6c3e09ad2e682dbd73\", repo tag \"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\", repo digest \"us.gcr.io/hosted-grafana/hosted-grafana-pro@sha256:0853965a142fb95648de3281a7c71de0d05fb51616bc32b523dc2f1da6ca06dc\", size \"173405048\" in 14.680303992s" +time="2024-05-07T11:59:31.813758661Z" level=info msg="ImageCreate event name:\"us.gcr.io/hosted-grafana/hosted-grafana-pro@sha256:0853965a142fb95648de3281a7c71de0d05fb51616bc32b523dc2f1da6ca06dc\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +time="2024-05-07T11:59:31.811392832Z" level=info msg="ImageUpdate event name:\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +time="2024-05-07T11:59:31.809798213Z" level=info msg="ImageCreate event name:\"sha256:0036b00b52fc547c944c1c820817d91fba6e20775cbf4e6c3e09ad2e682dbd73\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +time="2024-05-07T11:59:31.808692900Z" level=info msg="stop pulling image us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397: active requests=0, bytes read=173418678" +time="2024-05-07T11:59:31.807327183Z" level=info msg="ImageCreate event name:\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-70397\" labels:{key:\"io.cri-containerd.image\" value:\"managed\"}" +I0507 11:59:31.729843 4735 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-m7wp" status="Running" +var-lib-containerd-tmpmounts-containerd\x2dmount4071253084.mount: Deactivated successfully. +E0507 11:59:31.705263 4602 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-4gclf_hosted-grafana(fe493f66-8d1f-4435-9208-0304fd499ee1)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-4gclf" podUID="fe493f66-8d1f-4435-9208-0304fd499ee1" +I0507 11:59:31.704465 4602 scope.go:117] "RemoveContainer" containerID="2773338620ccfb32536d17788865e6fd4c7de7250ab31a7922195ffc1387ee5f" +I0507 11:59:31.624527 4600 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-v7l7" status="Running" +I0507 11:59:31.620358 6247 prober.go:107] "Probe failed" probeType="Readiness" pod="grafana-agent/grafana-agent-helm-4" podUID="c36c5200-1cd6-4093-893c-c022f91af996" containerName="grafana-agent" probeResult="failure" output="Get \"http://10.0.99.125:3090/-/ready\": dial tcp 10.0.99.125:3090: connect: connection refused" +I0507 11:59:31.619462 4733 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dd5b" status="Running" +I0507 11:59:31.617463 4733 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dd5b" status="Running" +E0507 11:59:31.554203 4531 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"frontend\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=frontend pod=otel-demo-alt-dev-frontend-79ccf98858-mbj4x_otel-demo-alt(d08e620e-00d0-49f1-a195-820a62e8de8f)\"" pod="otel-demo-alt/otel-demo-alt-dev-frontend-79ccf98858-mbj4x" podUID="d08e620e-00d0-49f1-a195-820a62e8de8f" +I0507 11:59:31.553381 4531 scope.go:117] "RemoveContainer" containerID="30500dc79eb03686dd9399cf180582d080070e4a1e9445f98eea7d7867b7bc3d" +run-containerd-io.containerd.runtime.v2.task-k8s.io-d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679-rootfs.mount: Deactivated successfully. +cri-containerd-d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679.scope: Consumed 18.147s CPU time. +cri-containerd-d53fbb23caf1e92d73b7ccf3a991c2ccd1d1b1ef072bfb1f6798a781d3809679.scope: Deactivated successfully. +E0507 11:59:31.468693 3315 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"gcom-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/frontend-monitoring:6a8eb5a\\\"\"" pod="faro/update-usage-28487080-9sqzn" podUID="2cc85139-2f31-44ae-a308-3dc0df893592" +E0507 11:59:31.363226 4601 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6439-933-1\\\"\"" pod="hosted-grafana/ephemeral1180076306439dafyddt-grafana-7bcdd45ddc-l5xtr" podUID="57291357-8942-4110-8df1-c23b055d53d6" +I0507 11:59:31.298370 4772 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-2dqk" status="Running" +I0507 11:59:31.194140 4733 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-lgmg" status="Running" +XMT: Solicit on eth0, interval 117900ms. +I0507 11:59:31.152952 4764 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-1-main-n2s16-1-1dd-97837cc3-k49c" status="Running" +I0507 11:59:31.141456 1970964 cache.go:40] re-using cached key and certificate +I0507 11:59:31.014439 4730 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-d88q" status="Running" +RCV: Reply message on eth0 from fe80::e9:7eff:fedf:3d37. +XMT: Renew on eth0, interval 9700ms. +PRC: Renewing lease on eth0. +E0507 11:59:30.965946 4731 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"overrides-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/kubernetes-dev/enterprise-logs:callum-shard-firstlast-08\\\"\"" pod="loki-dev-010/overrides-exporter-98c77fd66-6zj6m" podUID="1ff5bf3e-5856-4f6f-ae04-273f2dee170b" +I0507 11:59:30.936319 4607 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-mzxx" status="Running" +E0507 11:59:30.925932 4733 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=prometheus pod=bryan-prometheus-0_bryan-prometheus(6dadfe71-eb19-4231-a96e-c64bb5499a1e)\"" pod="bryan-prometheus/bryan-prometheus-0" podUID="6dadfe71-eb19-4231-a96e-c64bb5499a1e" +I0507 11:59:30.925416 4733 scope.go:117] "RemoveContainer" containerID="f0f5ac8b5f4dba0a416c838dd7ccfa903bd1ca22e36ebc4d98a29b4e646063c6" +I0507 11:59:30.908672 4724 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-j5wp" status="Running" +I0507 11:59:30.893285 4737 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-h9sj" status="Running" +E0507 11:59:30.886609 4597 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=ephemeral1511182177667ryantxu-grafana-75c5c488b7-4lj5v_hosted-grafana(44a19bfe-3d16-48c2-ad37-08eb74fc6637)\"" pod="hosted-grafana/ephemeral1511182177667ryantxu-grafana-75c5c488b7-4lj5v" podUID="44a19bfe-3d16-48c2-ad37-08eb74fc6637" +I0507 11:59:30.885963 4597 scope.go:117] "RemoveContainer" containerID="004f450ab68ac54937e0695bf2ff318d6219fb3fc4afe1b7fae7346c7f7f962d" +I0507 11:59:30.862910 4609 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-kpwx" status="Running" +E0507 11:59:30.829525 4591 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6teststeady5-grafana-68bd494c65-2q4h8_hosted-grafana(18b85a56-363f-4d78-aef4-541eb20108bf)\"" pod="hosted-grafana/k6teststeady5-grafana-68bd494c65-2q4h8" podUID="18b85a56-363f-4d78-aef4-541eb20108bf" +I0507 11:59:30.828924 4591 scope.go:117] "RemoveContainer" containerID="c337186d90d7c7bc46e7ddfed3c4831486b74fa243b590c20dd29bb87bb7b93b" +I0507 11:59:30.770697 4739 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-78dr" status="Running" +I0507 11:59:30.762441 4773 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-hcwk" status="Running" +E0507 11:59:30.604771 4586 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=dev05devuseast0test-grafana-6cb68b9788-v8dgd_hosted-grafana(59ef7574-134f-4888-826e-9a22062f29f8)\"" pod="hosted-grafana/dev05devuseast0test-grafana-6cb68b9788-v8dgd" podUID="59ef7574-134f-4888-826e-9a22062f29f8" +I0507 11:59:30.604104 4586 scope.go:117] "RemoveContainer" containerID="c1992a17a0b5dc3d80080fcc1602d9481f2b4259ab708628828de7f34211f199" +E0507 11:59:30.570213 4595 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=ephemeral1180076306267marefr-grafana-78764cf8d9-vpztz_hosted-grafana(a78403f0-4ce8-4320-9df1-0d15e427b4a1)\"" pod="hosted-grafana/ephemeral1180076306267marefr-grafana-78764cf8d9-vpztz" podUID="a78403f0-4ce8-4320-9df1-0d15e427b4a1" +I0507 11:59:30.569190 4595 scope.go:117] "RemoveContainer" containerID="0b227353407956e7e3fcf7752ca1eec752856d7e36ca37dcc004e2cc7a749079" +ll header: 00000000: 42 01 0a 80 00 17 42 01 0a 80 00 01 08 00 +IPv4: martian source 10.132.141.91 from 10.132.141.80, on dev eth0 +I0507 11:59:30.367585 4601 kubelet_volumes.go:161] "Cleaned up orphaned pod volumes dir" podUID="10bdda8a-7f0b-466e-9c81-045fb5150dc4" path="/var/lib/kubelet/pods/10bdda8a-7f0b-466e-9c81-045fb5150dc4/volumes" +E0507 11:59:30.363662 4601 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-85282-20418-1\\\"\"" pod="hosted-grafana/ephemeral1511182185282svenner-grafana-6f6b6f4d85-9xlcc" podUID="fee4a5b2-d22d-4d80-8041-8796a997679a" +XMT: Solicit on eth0, interval 130040ms. +I0507 11:59:30.304754 4779 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-nx8q" status="Running" +I0507 11:59:30.200842 4769 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-kvxj" status="Running" +I0507 11:59:30.198170 4748 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-databenchloki-n2-8c6b6266-2tvt" status="Running" +E0507 11:59:30.152725 4572 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-enterprise-6372-855-1\\\"\"" pod="hosted-grafana/ephemeral1180076306372jacobso-grafana-7f66f49b8d-kzhxd" podUID="7ac84154-783b-4672-b865-f728da592129" +E0507 11:59:30.092903 4724 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cortex-gw\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=cortex-gw pod=cortex-gw-6f7f764f94-rgtw8_faro(d6bf8bcc-35b9-4c1f-ab69-f857a2328d11)\"" pod="faro/cortex-gw-6f7f764f94-rgtw8" podUID="d6bf8bcc-35b9-4c1f-ab69-f857a2328d11" +I0507 11:59:30.092498 4724 scope.go:117] "RemoveContainer" containerID="60da1d466a5340942033d5d688a2f4ad116039a5035b5b6f8233fd240d6472bf" +E0507 11:59:30.042197 4589 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-56b7c6b6df-nj27g_hosted-grafana(212d6baa-7068-4ad2-9617-f67f010e866d)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-56b7c6b6df-nj27g" podUID="212d6baa-7068-4ad2-9617-f67f010e866d" +I0507 11:59:30.041381 4589 scope.go:117] "RemoveContainer" containerID="efb5462666d496e154e0477e0540b5325157c76f784e16834d1ab78c4fce2815" +I0507 11:59:29.861354 4531 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-otel-alt-n2s4-0--3cf760c5-s8l4" status="Running" +I0507 11:59:29.809996 4602 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-7pn8" status="Running" +E0507 11:59:29.725681 3089 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"azure-resourcemanager-exporter\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=azure-resourcemanager-exporter pod=azure-resourcemanager-exporter-6b5b58c666-rsttd_infra-exporters(5a95f801-309c-4f33-864a-406262c6ece6)\"" pod="infra-exporters/azure-resourcemanager-exporter-6b5b58c666-rsttd" podUID="5a95f801-309c-4f33-864a-406262c6ece6" +I0507 11:59:29.725405 3089 scope.go:117] "RemoveContainer" containerID="fc52eb9945ce8a690b931db46692a6dd0bd9595808feb29e404ffd565f685f84" +E0507 11:59:29.722713 4732 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"support-agent\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=support-agent pod=support-agent-557dff8b77-c6f8b_support-agent(ede5a224-96fb-45d0-b452-1eb2de73cf19)\"" pod="support-agent/support-agent-557dff8b77-c6f8b" podUID="ede5a224-96fb-45d0-b452-1eb2de73cf19" +I0507 11:59:29.722345 4732 scope.go:117] "RemoveContainer" containerID="e0a235a59cc57d2dbbcab276b25c7bb1bab9cecc37697779748125072457736f" +E0507 11:59:29.667989 2776 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"goldpinger\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=goldpinger pod=goldpinger-dw7wf_goldpinger(7dc39876-4602-45e9-a701-c9f8bf8c70b9)\"" pod="goldpinger/goldpinger-dw7wf" podUID="7dc39876-4602-45e9-a701-c9f8bf8c70b9" +I0507 11:59:29.667381 2776 scope.go:117] "RemoveContainer" containerID="6f49a440ca8bc4e796384c08cafe8a9402ece7910a5413cb95d8c4fc808e86cd" +I0507 11:59:29.656819 4742 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-qqlx" status="Running" +E0507 11:59:29.603893 4586 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6432-916-1\\\"\"" pod="hosted-grafana/ephemeral1180076306432stephan-grafana-696d787664-jftqh" podUID="41fba902-127b-4514-b1ca-ed431bc59a6c" +E0507 11:59:29.570535 4595 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=ephemeral1511182179279sarahzi-grafana-c8fbf74dd-cnskb_hosted-grafana(b2b8b8d9-9323-467d-99f5-e3289720a333)\"" pod="hosted-grafana/ephemeral1511182179279sarahzi-grafana-c8fbf74dd-cnskb" podUID="b2b8b8d9-9323-467d-99f5-e3289720a333" +I0507 11:59:29.569915 4595 scope.go:117] "RemoveContainer" containerID="e223c471263c29a926b1319ae96b0ca116e3668d27011b6bc6fa5adebc0558c5" +I0507 11:59:29.550166 4735 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-lngb" status="Running" +E0507 11:59:29.538430 4590 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-85282-20418-1\\\"\"" pod="hosted-grafana/ephemeral1511182185282svenner-grafana-6944cbdfcc-64z2p" podUID="1abeccba-cc20-47a4-b55c-fff4b7decbe1" +E0507 11:59:29.538063 4590 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=k6teststeady6-grafana-67b679bd8c-l7knf_hosted-grafana(c5975fd5-22d7-4efb-a6b6-3064876188c1)\"" pod="hosted-grafana/k6teststeady6-grafana-67b679bd8c-l7knf" podUID="c5975fd5-22d7-4efb-a6b6-3064876188c1" +I0507 11:59:29.537415 4590 scope.go:117] "RemoveContainer" containerID="5b8aad8ab95e5f4201702424140d73f5cc582d6d48583a31ca0b0dabea27d806" +I0507 11:59:29.434503 1119040 cache.go:40] re-using cached key and certificate +ll header: 00000000: 42 01 0a 80 00 17 42 01 0a 80 00 01 08 00 +IPv4: martian source 10.132.141.91 from 10.132.141.80, on dev eth0 +I0507 11:59:29.408118 4734 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-databenchloki-n2-8c6b6266-bz78" status="Running" +I0507 11:59:29.320184 1537502 kubelet_pods.go:906] "Unable to retrieve pull secret, the image pull may not succeed." pod="logs-endpoint-dev-005/kafka-controller-0" secret="" err="secret \"not-needed\" not found" +E0507 11:59:29.151970 4572 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:11.1.0-ephemeral-6439-933-1\\\"\"" pod="hosted-grafana/ephemeral1180076306439dafyddt-grafana-9769b9f5-g5qqf" podUID="e6633496-a926-4a28-8db8-6405d33cb4bc" +I0507 11:59:29.142840 4763 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x94l" status="Running" +E0507 11:59:28.939546 3659 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cortex-gw\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=cortex-gw pod=cortex-gw-74f78948ff-9pcl6_faro(643043e2-707a-4a3f-adf3-08beab1d1ea7)\"" pod="faro/cortex-gw-74f78948ff-9pcl6" podUID="643043e2-707a-4a3f-adf3-08beab1d1ea7" +I0507 11:59:28.939111 3659 scope.go:117] "RemoveContainer" containerID="9940112c30fda42aa2b814faddfc969d9a2328ae70ecb9b858d75aa6f8b61483" +E0507 11:59:28.925475 4733 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"pdc\" with ErrImageNeverPull: \"Container image \\\"us.gcr.io/hosted-grafana/pdc:0.1.415\\\" is not present with pull policy of Never\"" pod="pdc/private-datasource-connect-564fb6cfbb-l8pgv" podUID="57e4a0cb-5e77-47bd-b277-70f4b1512c44" +E0507 11:59:28.925419 4733 kuberuntime_manager.go:1256] container &Container{Name:pdc,Image:us.gcr.io/hosted-grafana/pdc:0.1.415,Command:[],Args:[-proxy.auth.ca-keys-dir=/var/run/secrets/pdc-certs -proxy.socks-server.addr=:10443 -proxy.ssh-server.addr=:2222 -proxy.use-socks-username-for-routing -proxy.api.http-address=:9182 -proxy.check-connpool-address-in-ring -memberlist.join=dns+gossip-ring.pdc.svc.cluster.local:7946 -api.http-address=:11443 -distributor.enabled=true -distributor.addr=:10444 -distributor.use-socks-username-for-routing -gateway.enabled=true -gateway.addr=:2244 -log.level=debug -certs.ca-private-key-file=/var/run/secrets/pdc-certs/ca.key -certs.ca-cert-file=/var/run/secrets/pdc-certs/ca.crt -certs.ca-pub-file=/var/run/secrets/pdc-certs/ca.pub -certs.cluster=local-k8s -shard-size=3 -graceful-shutdown-period=30s -enable-multiple-networks],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:socks,HostPort:0,ContainerPort:10443,Protocol:TCP,HostIP:,},ContainerPort{Name:ssh,HostPort:0,ContainerPort:2222,Protocol:TCP,HostIP:,},ContainerPort{Name:distributor,HostPort:0,ContainerPort:10444,Protocol:TCP,HostIP:,},ContainerPort{Name:gateway,HostPort:0,ContainerPort:2244,Protocol:TCP,HostIP:,},ContainerPort{Name:api,HostPort:0,ContainerPort:11443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{250 -3} {} 250m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:pdc-certs,ReadOnly:true,MountPath:/var/run/secrets/pdc-certs,SubPath:,MountPropagation:nil,SubPathExpr:,},VolumeMount{Name:kube-api-access-wcbmb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/ready,Port:{0 11443 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:40,TimeoutSeconds:1,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/bin/sleep 5],},HTTPGet:nil,TCPSocket:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Never,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod private-datasource-connect-564fb6cfbb-l8pgv_pdc(57e4a0cb-5e77-47bd-b277-70f4b1512c44): ErrImageNeverPull: Container image "us.gcr.io/hosted-grafana/pdc:0.1.415" is not present with pull policy of Never +E0507 11:59:28.889010 4597 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with ImagePullBackOff: \"Back-off pulling image \\\"us.gcr.io/hosted-grafana/hosted-grafana-pro:10.1.0-ephemeral-oss-77487-8287-1\\\"\"" pod="hosted-grafana/ephemeral1511182177487torkelo-grafana-79dd77959f-2l2kd" podUID="4d3be4e9-d8c5-487f-a292-ecb699c3aaad" +E0507 11:59:28.761691 3303 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cortex-gw\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=cortex-gw pod=cortex-gw-78bc9b5ccc-8hkmp_faro(44b54226-b4bd-46e0-a3f0-257cb44d9ea8)\"" pod="faro/cortex-gw-78bc9b5ccc-8hkmp" podUID="44b54226-b4bd-46e0-a3f0-257cb44d9ea8" +I0507 11:59:28.761235 3303 scope.go:117] "RemoveContainer" containerID="9f3955a57aa496cb888a35102ef0ee777d6a75cdc12addbdafc2d9b3fb9cc080" +E0507 11:59:28.744029 4601 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=benchloadtestingxxl2-grafana-5bb9765dd8-ktf2b_hosted-grafana(e8405a93-3a4c-4074-909d-661219c1f849)\"" pod="hosted-grafana/benchloadtestingxxl2-grafana-5bb9765dd8-ktf2b" podUID="e8405a93-3a4c-4074-909d-661219c1f849" +I0507 11:59:28.743482 4601 scope.go:117] "RemoveContainer" containerID="8dbc699386128aa4e4af25beb0ea7e7ecad1b2d5e829061a04ff808054f050aa" +I0507 11:59:28.728222 4586 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-8dwk" status="Running" +I0507 11:59:28.706031 4616 kubelet_getters.go:187] "Pod status updated" pod="kube-system/kube-proxy-gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-t4fv" status="Running" +I0507 11:59:28.671473 4601 kubelet.go:2404] "SyncLoop REMOVE" source="api" pods=["hosted-grafana/sloappverify-grafana-764f9644df-wzxz5"] +E0507 11:59:28.664457 4601 pod_workers.go:1300] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"grafana\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=grafana pod=ltest-grafana-58869c5dd7-xgm5l_hosted-grafana(4e4c9c69-de88-44dd-bd71-7b8ef56554b1)\"" pod="hosted-grafana/ltest-grafana-58869c5dd7-xgm5l" podUID="4e4c9c69-de88-44dd-bd71-7b8ef56554b1" +I0507 11:59:28.663840 4601 scope.go:117] "RemoveContainer" containerID="7dd19cfdca617fbbcacdd9cb716cf62666ab719dac31979615f13c0a7adc87a7" +I0507 11:59:28.663309 4601 kubelet.go:2498] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="hosted-grafana/ltest-grafana-58869c5dd7-xgm5l" +I0507 11:59:28.654287 4601 kubelet.go:2410] "SyncLoop DELETE" source="api" pods=["hosted-grafana/sloappverify-grafana-764f9644df-wzxz5"] +I0507 11:59:28.639440 4601 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"ea8c181d2a9baf4e2819046a0699151c11e7d761b3ccdf0b0beaa713ce50fe02"} err="failed to get container status \"ea8c181d2a9baf4e2819046a0699151c11e7d761b3ccdf0b0beaa713ce50fe02\": rpc error: code = NotFound desc = an error occurred when try to find container \"ea8c181d2a9baf4e2819046a0699151c11e7d761b3ccdf0b0beaa713ce50fe02\": not found" +E0507 11:59:28.639400 4601 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"ea8c181d2a9baf4e2819046a0699151c11e7d761b3ccdf0b0beaa713ce50fe02\": not found" containerID="ea8c181d2a9baf4e2819046a0699151c11e7d761b3ccdf0b0beaa713ce50fe02" +time="2024-05-07T11:59:28.639205289Z" level=error msg="ContainerStatus for \"ea8c181d2a9baf4e2819046a0699151c11e7d761b3ccdf0b0beaa713ce50fe02\" failed" error="rpc error: code = NotFound desc = an error occurred when try to find container \"ea8c181d2a9baf4e2819046a0699151c11e7d761b3ccdf0b0beaa713ce50fe02\": not found" +I0507 11:59:28.639007 4601 scope.go:117] "RemoveContainer" containerID="ea8c181d2a9baf4e2819046a0699151c11e7d761b3ccdf0b0beaa713ce50fe02" +I0507 11:59:28.638984 4601 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"c8a30401d2ac9c86fdf4db11df6731b750ed2d044efe5757037c04846c0d28c1"} err="failed to get container status \"c8a30401d2ac9c86fdf4db11df6731b750ed2d044efe5757037c04846c0d28c1\": rpc error: code = NotFound desc = an error occurred when try to find container \"c8a30401d2ac9c86fdf4db11df6731b750ed2d044efe5757037c04846c0d28c1\": not found" +E0507 11:59:28.638943 4601 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"c8a30401d2ac9c86fdf4db11df6731b750ed2d044efe5757037c04846c0d28c1\": not found" containerID="c8a30401d2ac9c86fdf4db11df6731b750ed2d044efe5757037c04846c0d28c1" +time="2024-05-07T11:59:28.638744170Z" level=error msg="ContainerStatus for \"c8a30401d2ac9c86fdf4db11df6731b750ed2d044efe5757037c04846c0d28c1\" failed" error="rpc error: code = NotFound desc = an error occurred when try to find container \"c8a30401d2ac9c86fdf4db11df6731b750ed2d044efe5757037c04846c0d28c1\": not found" +I0507 11:59:28.638577 4601 scope.go:117] "RemoveContainer" containerID="c8a30401d2ac9c86fdf4db11df6731b750ed2d044efe5757037c04846c0d28c1" +I0507 11:59:28.638564 4601 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"containerd","ID":"c50338fdb9905376f6d1db35c61599f712f6d3a4b9604b6dc64bf62aea9b3b13"} err="failed to get container status \"c50338fdb9905376f6d1db35c61599f712f6d3a4b9604b6dc64bf62aea9b3b13\": rpc error: code = NotFound desc = an error occurred when try to find container \"c50338fdb9905376f6d1db35c61599f712f6d3a4b9604b6dc64bf62aea9b3b13\": not found" +E0507 11:59:28.638532 4601 remote_runtime.go:432] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = an error occurred when try to find container \"c50338fdb9905376f6d1db35c61599f712f6d3a4b9604b6dc64bf62aea9b3b13\": not found" containerID="c50338fdb9905376f6d1db35c61599f712f6d3a4b9604b6dc64bf62aea9b3b13" \ No newline at end of file diff --git a/pkg/pattern/drain/testdata/kafka.txt b/pkg/pattern/drain/testdata/kafka.txt new file mode 100644 index 0000000000000..b8a2799890454 --- /dev/null +++ b/pkg/pattern/drain/testdata/kafka.txt @@ -0,0 +1,1000 @@ +[2024-05-07 10:55:53,434] INFO [ProducerStateManager partition=ingest-3] Wrote producer snapshot at offset 184233903 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:53,432] INFO [LocalLog partition=ingest-3, dir=/bitnami/kafka/data] Rolled new log segment at offset 184233903 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:53,251] INFO [UnifiedLog partition=cortex-dev-01-aggregations-offsets-0, dir=/bitnami/kafka/data] Incremented log start offset to 2142693 due to leader offset increment (kafka.log.UnifiedLog) +[2024-05-07 10:55:53,040] INFO Deleted producer state snapshot /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-1/00000000000000447969.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:53,040] INFO Deleted producer state snapshot /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-1/00000000000000447957.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:53,038] INFO Deleted time index /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-1/00000000000000447969.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:53,038] INFO Deleted offset index /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-1/00000000000000447969.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:53,038] INFO Deleted log /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-1/00000000000000447969.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:53,038] INFO Deleted time index /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-1/00000000000000447957.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:53,038] INFO Deleted offset index /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-1/00000000000000447957.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:53,038] INFO Deleted log /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-1/00000000000000447957.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:53,038] INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-1, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=447957, size=948, lastModifiedTime=1715059232052, largestRecordTimestamp=Some(1715059232002)),LogSegment(baseOffset=447969, size=948, lastModifiedTime=1715059424352, largestRecordTimestamp=Some(1715059424301)) (kafka.log.LocalLog$) +[2024-05-07 10:55:53,034] INFO Deleted producer state snapshot /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-0/00000000000000448165.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:53,033] INFO Deleted time index /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-0/00000000000000448165.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:53,033] INFO Deleted offset index /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-0/00000000000000448165.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:53,033] INFO Deleted log /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-0/00000000000000448165.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:53,032] INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-0, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=448165, size=948, lastModifiedTime=1715059402054, largestRecordTimestamp=Some(1715059402004)) (kafka.log.LocalLog$) +[2024-05-07 10:55:53,032] INFO Deleted producer state snapshot /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-0/00000000000000448153.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:53,030] INFO Deleted time index /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-0/00000000000000448153.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:53,030] INFO Deleted offset index /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-0/00000000000000448153.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:53,030] INFO Deleted log /bitnami/kafka/data/mimir-dev-09-aggregations-offsets-0/00000000000000448153.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:53,030] INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-0, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=448153, size=948, lastModifiedTime=1715059202091, largestRecordTimestamp=Some(1715059202040)) (kafka.log.LocalLog$) +[2024-05-07 10:55:52,850] INFO [ProducerStateManager partition=ingest-6] Wrote producer snapshot at offset 182088575 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:52,848] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Rolled new log segment at offset 182088575 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:52,846] INFO [ProducerStateManager partition=ingest-6] Wrote producer snapshot at offset 182088575 with 0 producer ids in 3 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:52,844] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Rolled new log segment at offset 182088575 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:52,480] INFO [ProducerStateManager partition=ingest-11] Wrote producer snapshot at offset 66190192 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:52,477] INFO [LocalLog partition=ingest-11, dir=/bitnami/kafka/data] Rolled new log segment at offset 66190192 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:52,367] INFO [ProducerStateManager partition=ingest-7] Wrote producer snapshot at offset 180832086 with 0 producer ids in 3 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:52,365] INFO [LocalLog partition=ingest-7, dir=/bitnami/kafka/data] Rolled new log segment at offset 180832086 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:52,308] INFO [ProducerStateManager partition=ingest-12] Wrote producer snapshot at offset 36659198 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:52,305] INFO [LocalLog partition=ingest-12, dir=/bitnami/kafka/data] Rolled new log segment at offset 36659198 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:52,303] INFO [ProducerStateManager partition=ingest-12] Wrote producer snapshot at offset 36659198 with 0 producer ids in 3 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:52,301] INFO [LocalLog partition=ingest-12, dir=/bitnami/kafka/data] Rolled new log segment at offset 36659198 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:52,192] INFO [ProducerStateManager partition=ingest-10] Wrote producer snapshot at offset 121932238 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:52,189] INFO [LocalLog partition=ingest-10, dir=/bitnami/kafka/data] Rolled new log segment at offset 121932238 in 1 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:52,186] INFO [ProducerStateManager partition=ingest-10] Wrote producer snapshot at offset 121932238 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:52,184] INFO [LocalLog partition=ingest-10, dir=/bitnami/kafka/data] Rolled new log segment at offset 121932238 in 1 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:52,053] INFO [UnifiedLog partition=mimir-dev-15-aggregations-offsets-3, dir=/bitnami/kafka/data] Incremented log start offset to 51170 due to segment deletion (kafka.log.UnifiedLog) +[2024-05-07 10:55:52,047] INFO [UnifiedLog partition=mimir-dev-15-aggregations-offsets-3, dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=51158, size=948, lastModifiedTime=1715059560415, largestRecordTimestamp=Some(1715059560369)) due to retention size 102400 breach. Log size after deletion will be 102542. (kafka.log.UnifiedLog) +[2024-05-07 10:55:52,047] INFO [UnifiedLog partition=mimir-dev-15-aggregations-offsets-3, dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=51146, size=948, lastModifiedTime=1715059380417, largestRecordTimestamp=Some(1715059380370)) due to retention size 102400 breach. Log size after deletion will be 103490. (kafka.log.UnifiedLog) +[2024-05-07 10:55:52,043] INFO [UnifiedLog partition=mimir-dev-15-aggregations-offsets-2, dir=/bitnami/kafka/data] Deleting segments due to log start offset 39847 breach: LogSegment(baseOffset=39823, size=948, lastModifiedTime=1715059620420, largestRecordTimestamp=Some(1715059620372)),LogSegment(baseOffset=39835, size=948, lastModifiedTime=1715059800918, largestRecordTimestamp=Some(1715059800870)) (kafka.log.UnifiedLog) +[2024-05-07 10:55:51,881] INFO [ProducerStateManager partition=ingest-5] Wrote producer snapshot at offset 183097322 with 0 producer ids in 3 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:51,878] INFO [LocalLog partition=ingest-5, dir=/bitnami/kafka/data] Rolled new log segment at offset 183097322 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:51,713] INFO [UnifiedLog partition=mimir-dev-15-aggregations-offsets-1, dir=/bitnami/kafka/data] Incremented log start offset to 45157 due to segment deletion (kafka.log.UnifiedLog) +[2024-05-07 10:55:51,709] INFO [UnifiedLog partition=mimir-dev-15-aggregations-offsets-1, dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=45145, size=948, lastModifiedTime=1715059875915, largestRecordTimestamp=Some(1715059875870)) due to retention size 102400 breach. Log size after deletion will be 102542. (kafka.log.UnifiedLog) +[2024-05-07 10:55:51,709] INFO [UnifiedLog partition=mimir-dev-15-aggregations-offsets-1, dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=45133, size=948, lastModifiedTime=1715059695414, largestRecordTimestamp=Some(1715059695369)) due to retention size 102400 breach. Log size after deletion will be 103490. (kafka.log.UnifiedLog) +[2024-05-07 10:55:51,703] INFO [UnifiedLog partition=mimir-dev-15-aggregations-offsets-0, dir=/bitnami/kafka/data] Deleting segments due to log start offset 45366 breach: LogSegment(baseOffset=45342, size=948, lastModifiedTime=1715059155914, largestRecordTimestamp=Some(1715059155868)),LogSegment(baseOffset=45354, size=948, lastModifiedTime=1715059335414, largestRecordTimestamp=Some(1715059335369)) (kafka.log.UnifiedLog) +[2024-05-07 10:55:51,701] INFO [UnifiedLog partition=mimir-dev-15-aggregations-0, dir=/bitnami/kafka/data] Deleting segments due to log start offset 16287235806 breach: LogSegment(baseOffset=16269600515, size=1073716081, lastModifiedTime=1715057517177, largestRecordTimestamp=Some(1715057517123)) (kafka.log.UnifiedLog) +[2024-05-07 10:55:51,565] INFO [ProducerStateManager partition=ingest-1] Wrote producer snapshot at offset 183307543 with 0 producer ids in 3 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:51,562] INFO [LocalLog partition=ingest-1, dir=/bitnami/kafka/data] Rolled new log segment at offset 183307543 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:50,620] INFO [UnifiedLog partition=cortex-dev-01-aggregations-offsets-2, dir=/bitnami/kafka/data] Incremented log start offset to 2146379 due to leader offset increment (kafka.log.UnifiedLog) +[2024-05-07 10:55:50,574] INFO [UnifiedLog partition=mimir-dev-11-aggregations-offsets-0, dir=/bitnami/kafka/data] Incremented log start offset to 1452503 due to segment deletion (kafka.log.UnifiedLog) +[2024-05-07 10:55:50,573] INFO [UnifiedLog partition=mimir-dev-11-aggregations-offsets-0, dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=1452491, size=972, lastModifiedTime=1715060130758, largestRecordTimestamp=Some(1715060130709)) due to retention size 102400 breach. Log size after deletion will be 102627. (kafka.log.UnifiedLog) +[2024-05-07 10:55:50,571] INFO [UnifiedLog partition=mimir-dev-11-aggregations-offsets-0, dir=/bitnami/kafka/data] Deleting segments due to log start offset 1452491 breach: LogSegment(baseOffset=1452479, size=972, lastModifiedTime=1715059950760, largestRecordTimestamp=Some(1715059950710)) (kafka.log.UnifiedLog) +[2024-05-07 10:55:50,567] INFO [UnifiedLog partition=mimir-dev-11-aggregations-0, dir=/bitnami/kafka/data] Incremented log start offset to 1821214489459 due to segment deletion (kafka.log.UnifiedLog) +[2024-05-07 10:55:50,566] INFO [UnifiedLog partition=mimir-dev-11-aggregations-0, dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=1821196355184, size=1073727044, lastModifiedTime=1715073016755, largestRecordTimestamp=Some(1715073016736)) due to retention size 38386270208 breach. Log size after deletion will be 39151186165. (kafka.log.UnifiedLog) +[2024-05-07 10:55:50,564] INFO [UnifiedLog partition=mimir-dev-11-aggregations-0, dir=/bitnami/kafka/data] Deleting segments due to log start offset 1821196355184 breach: LogSegment(baseOffset=1821178210534, size=1073738122, lastModifiedTime=1715072848434, largestRecordTimestamp=Some(1715072848414)) (kafka.log.UnifiedLog) +[2024-05-07 10:55:50,560] INFO [UnifiedLog partition=mimir-dev-11-aggregations-offsets-1, dir=/bitnami/kafka/data] Incremented log start offset to 1430412 due to segment deletion (kafka.log.UnifiedLog) +[2024-05-07 10:55:50,557] INFO [UnifiedLog partition=mimir-dev-11-aggregations-offsets-1, dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=1430400, size=972, lastModifiedTime=1715060324856, largestRecordTimestamp=Some(1715060324807)) due to retention size 102400 breach. Log size after deletion will be 102708. (kafka.log.UnifiedLog) +[2024-05-07 10:55:50,557] INFO [UnifiedLog partition=mimir-dev-11-aggregations-offsets-1, dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=1430388, size=972, lastModifiedTime=1715060144852, largestRecordTimestamp=Some(1715060144801)) due to retention size 102400 breach. Log size after deletion will be 103680. (kafka.log.UnifiedLog) +[2024-05-07 10:55:50,553] INFO [UnifiedLog partition=mimir-dev-11-aggregations-1, dir=/bitnami/kafka/data] Incremented log start offset to 1868088890732 due to segment deletion (kafka.log.UnifiedLog) +[2024-05-07 10:55:50,548] INFO [UnifiedLog partition=mimir-dev-11-aggregations-1, dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=1868070627390, size=1073735205, lastModifiedTime=1715073348725, largestRecordTimestamp=Some(1715073348676)) due to retention size 38386270208 breach. Log size after deletion will be 38481373648. (kafka.log.UnifiedLog) +[2024-05-07 10:55:50,548] INFO [UnifiedLog partition=mimir-dev-11-aggregations-1, dir=/bitnami/kafka/data] Deleting segment LogSegment(baseOffset=1868052466387, size=1073738253, lastModifiedTime=1715073185854, largestRecordTimestamp=Some(1715073185833)) due to retention size 38386270208 breach. Log size after deletion will be 39555108853. (kafka.log.UnifiedLog) +[2024-05-07 10:55:50,518] INFO [ProducerStateManager partition=ingest-2] Wrote producer snapshot at offset 183573421 with 0 producer ids in 3 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:50,516] INFO [LocalLog partition=ingest-2, dir=/bitnami/kafka/data] Rolled new log segment at offset 183573421 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:50,503] INFO [ProducerStateManager partition=ingest-2] Wrote producer snapshot at offset 183573421 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:50,501] INFO [LocalLog partition=ingest-2, dir=/bitnami/kafka/data] Rolled new log segment at offset 183573421 in 1 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:49,501] INFO [ProducerStateManager partition=ingest-9] Wrote producer snapshot at offset 179378630 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:49,500] INFO [LocalLog partition=ingest-9, dir=/bitnami/kafka/data] Rolled new log segment at offset 179378630 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:48,697] INFO [ProducerStateManager partition=ingest-3] Wrote producer snapshot at offset 184231462 with 0 producer ids in 3 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:48,695] INFO [LocalLog partition=ingest-3, dir=/bitnami/kafka/data] Rolled new log segment at offset 184231462 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:48,203] INFO [ProducerStateManager partition=ingest-6] Wrote producer snapshot at offset 182086125 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:48,201] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Rolled new log segment at offset 182086125 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:48,195] INFO [ProducerStateManager partition=ingest-6] Wrote producer snapshot at offset 182086125 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:48,193] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Rolled new log segment at offset 182086125 in 1 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:48,174] INFO [ProducerStateManager partition=ingest-10] Wrote producer snapshot at offset 121930059 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:48,172] INFO [LocalLog partition=ingest-10, dir=/bitnami/kafka/data] Rolled new log segment at offset 121930059 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:48,169] INFO [ProducerStateManager partition=ingest-10] Wrote producer snapshot at offset 121930059 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:48,166] INFO [LocalLog partition=ingest-10, dir=/bitnami/kafka/data] Rolled new log segment at offset 121930059 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:47,901] INFO [ProducerStateManager partition=ingest-11] Wrote producer snapshot at offset 66187757 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:47,898] INFO [LocalLog partition=ingest-11, dir=/bitnami/kafka/data] Rolled new log segment at offset 66187757 in 1 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:47,784] INFO [ProducerStateManager partition=ingest-7] Wrote producer snapshot at offset 180829650 with 0 producer ids in 3 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:47,781] INFO [LocalLog partition=ingest-7, dir=/bitnami/kafka/data] Rolled new log segment at offset 180829650 in 1 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:47,713] INFO [ProducerStateManager partition=ingest-12] Wrote producer snapshot at offset 36656758 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:47,711] INFO [LocalLog partition=ingest-12, dir=/bitnami/kafka/data] Rolled new log segment at offset 36656758 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:47,709] INFO [ProducerStateManager partition=ingest-12] Wrote producer snapshot at offset 36656758 with 0 producer ids in 3 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:47,707] INFO [LocalLog partition=ingest-12, dir=/bitnami/kafka/data] Rolled new log segment at offset 36656758 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:47,183] INFO [ProducerStateManager partition=ingest-5] Wrote producer snapshot at offset 183094851 with 0 producer ids in 3 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:47,181] INFO [LocalLog partition=ingest-5, dir=/bitnami/kafka/data] Rolled new log segment at offset 183094851 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:46,933] INFO [ProducerStateManager partition=ingest-1] Wrote producer snapshot at offset 183305134 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:46,930] INFO [LocalLog partition=ingest-1, dir=/bitnami/kafka/data] Rolled new log segment at offset 183305134 in 1 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:46,282] INFO [ProducerStateManager partition=cortex-dev-01-aggregations-offsets-1] Wrote producer snapshot at offset 2142125 with 0 producer ids in 6 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:46,276] INFO [LocalLog partition=cortex-dev-01-aggregations-offsets-1, dir=/bitnami/kafka/data] Rolled new log segment at offset 2142125 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:45,932] INFO [ProducerStateManager partition=ingest-2] Wrote producer snapshot at offset 183571018 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:45,929] INFO [LocalLog partition=ingest-2, dir=/bitnami/kafka/data] Rolled new log segment at offset 183571018 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:45,927] INFO [ProducerStateManager partition=ingest-2] Wrote producer snapshot at offset 183571018 with 0 producer ids in 3 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:45,925] INFO [LocalLog partition=ingest-2, dir=/bitnami/kafka/data] Rolled new log segment at offset 183571018 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:45,526] INFO [ProducerStateManager partition=mimir-dev-14-aggregations-offsets-3] Wrote producer snapshot at offset 27664 with 0 producer ids in 43 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:45,484] INFO [LocalLog partition=mimir-dev-14-aggregations-offsets-3, dir=/bitnami/kafka/data] Rolled new log segment at offset 27664 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:44,916] INFO [ProducerStateManager partition=ingest-9] Wrote producer snapshot at offset 179376252 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:44,914] INFO [LocalLog partition=ingest-9, dir=/bitnami/kafka/data] Rolled new log segment at offset 179376252 in 1 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:44,212] INFO [ProducerStateManager partition=ingest-3] Wrote producer snapshot at offset 184229163 with 0 producer ids in 3 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:44,210] INFO [LocalLog partition=ingest-3, dir=/bitnami/kafka/data] Rolled new log segment at offset 184229163 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:43,486] INFO [ProducerStateManager partition=ingest-6] Wrote producer snapshot at offset 182083789 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:43,484] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Rolled new log segment at offset 182083789 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:43,482] INFO [ProducerStateManager partition=ingest-6] Wrote producer snapshot at offset 182083789 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:43,480] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Rolled new log segment at offset 182083789 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:43,456] INFO [ProducerStateManager partition=ingest-10] Wrote producer snapshot at offset 121927712 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:43,454] INFO [LocalLog partition=ingest-10, dir=/bitnami/kafka/data] Rolled new log segment at offset 121927712 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:43,452] INFO [ProducerStateManager partition=ingest-10] Wrote producer snapshot at offset 121927712 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:43,450] INFO [LocalLog partition=ingest-10, dir=/bitnami/kafka/data] Rolled new log segment at offset 121927712 in 1 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:43,228] INFO [ProducerStateManager partition=ingest-11] Wrote producer snapshot at offset 66185454 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:43,225] INFO [LocalLog partition=ingest-11, dir=/bitnami/kafka/data] Rolled new log segment at offset 66185454 in 1 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:43,199] INFO [ProducerStateManager partition=ingest-7] Wrote producer snapshot at offset 180827370 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:43,197] INFO [LocalLog partition=ingest-7, dir=/bitnami/kafka/data] Rolled new log segment at offset 180827370 in 1 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:43,128] INFO [ProducerStateManager partition=ingest-12] Wrote producer snapshot at offset 36654469 with 0 producer ids in 4 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:43,124] INFO [LocalLog partition=ingest-12, dir=/bitnami/kafka/data] Rolled new log segment at offset 36654469 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:43,118] INFO [ProducerStateManager partition=ingest-12] Wrote producer snapshot at offset 36654469 with 0 producer ids in 3 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:43,116] INFO [LocalLog partition=ingest-12, dir=/bitnami/kafka/data] Rolled new log segment at offset 36654469 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:42,496] INFO [ProducerStateManager partition=ingest-5] Wrote producer snapshot at offset 183092542 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:42,494] INFO [LocalLog partition=ingest-5, dir=/bitnami/kafka/data] Rolled new log segment at offset 183092542 in 5 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:42,204] INFO [ProducerStateManager partition=ingest-1] Wrote producer snapshot at offset 183302806 with 0 producer ids in 5 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:42,199] INFO [LocalLog partition=ingest-1, dir=/bitnami/kafka/data] Rolled new log segment at offset 183302806 in 1 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:41,099] INFO [ProducerStateManager partition=ingest-2] Wrote producer snapshot at offset 183568687 with 0 producer ids in 3 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:41,097] INFO [LocalLog partition=ingest-2, dir=/bitnami/kafka/data] Rolled new log segment at offset 183568687 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:41,091] INFO [ProducerStateManager partition=ingest-2] Wrote producer snapshot at offset 183568687 with 0 producer ids in 2 ms. (kafka.log.ProducerStateManager) +[2024-05-07 10:55:41,088] INFO [LocalLog partition=ingest-2, dir=/bitnami/kafka/data] Rolled new log segment at offset 183568687 in 0 ms. (kafka.log.LocalLog) +[2024-05-07 10:55:40,778] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182658341.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,778] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182655860.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,778] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182653526.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182651018.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182648504.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182646095.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182643712.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182641298.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182639001.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182636738.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182634546.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182632463.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182630290.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182627921.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182625520.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182623007.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182620446.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182618074.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182615634.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182613221.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182610888.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182608616.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182606451.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,777] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182604245.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182602171.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182599811.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182597289.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182594863.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182592327.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182589965.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182587493.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182585006.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182582637.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182580437.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182578716.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182576828.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182574372.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182571815.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182569086.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182566740.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182564173.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182561708.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182559282.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182556814.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182554415.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,776] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182552113.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,775] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182550001.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,775] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182547827.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,775] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182545622.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,775] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182543386.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,775] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182540856.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,775] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182538482.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,775] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182535953.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,775] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182533514.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,775] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182531056.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,775] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182528560.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,775] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-3/00000000000182526165.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,727] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182658341.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,727] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182658341.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,727] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182658341.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,727] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182655860.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,727] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182655860.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,727] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182655860.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,727] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182653526.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,727] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182653526.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,727] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182653526.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,726] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182651018.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,726] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182651018.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,726] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182651018.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,726] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182648504.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,726] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182648504.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,726] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182648504.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,726] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182646095.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,726] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182646095.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,726] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182646095.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,726] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182643712.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,726] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182643712.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,726] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182643712.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,726] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182641298.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182641298.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182641298.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182639001.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182639001.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182639001.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182636738.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182636738.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182636738.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182634546.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182634546.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182634546.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182632463.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182632463.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182632463.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182630290.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,725] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182630290.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,724] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182630290.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,724] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182627921.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,724] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182627921.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,724] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182627921.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,724] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182625520.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,724] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182625520.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,724] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182625520.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,724] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182623007.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,724] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182623007.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,724] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182623007.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,724] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182620446.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182620446.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182620446.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182618074.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182618074.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182618074.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182615634.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182615634.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182615634.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182613221.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182613221.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182613221.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182610888.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182610888.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182610888.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,723] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182608616.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,722] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182608616.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,722] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182608616.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,722] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182606451.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,722] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182606451.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,722] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182606451.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,722] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182604245.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,722] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182604245.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,722] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182604245.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,722] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182602171.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,722] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182602171.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,722] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182602171.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,722] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182599811.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,722] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182599811.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,722] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182599811.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,721] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182597289.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,721] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182597289.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,721] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182597289.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,721] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182594863.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,721] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182594863.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,721] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182594863.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,721] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182592327.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,721] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182592327.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,721] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182592327.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,721] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182589965.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,721] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182589965.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,721] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182589965.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,720] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182587493.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,720] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182587493.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,720] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182587493.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,720] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182585006.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,720] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182585006.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,720] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182585006.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,720] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182582637.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,720] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182582637.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,720] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182582637.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,720] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182580437.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,720] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182580437.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,720] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182580437.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182578716.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182578716.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182578716.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182576828.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182576828.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182576828.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182574372.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182574372.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182574372.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182571815.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182571815.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182571815.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182569086.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182569086.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,719] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182569086.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,718] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182566740.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,718] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182566740.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,718] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182566740.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,718] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182564173.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,718] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182564173.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,718] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182564173.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,718] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182561708.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,718] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182561708.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,718] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182561708.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,718] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182559282.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,717] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182559282.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,717] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182559282.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,717] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182556814.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,717] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182556814.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,717] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182556814.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,717] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182554415.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,717] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182554415.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,717] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182554415.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,717] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182552113.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,717] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182552113.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,717] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182552113.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,717] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182550001.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,716] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182550001.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,716] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182550001.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,716] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182547827.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,716] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182547827.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,716] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182547827.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,716] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182545622.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,716] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182545622.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,716] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182545622.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,716] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182543386.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,716] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182543386.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,716] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182543386.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,716] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182540856.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,715] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182540856.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,715] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182540856.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,715] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182538482.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,715] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182538482.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,715] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182538482.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,715] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182535953.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,715] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182535953.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,715] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182535953.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,715] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182533514.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,715] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182533514.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,715] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182533514.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,714] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182531056.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,714] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182531056.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,714] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182531056.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,714] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182528560.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,714] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182528560.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,714] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182528560.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,714] INFO Deleted time index /bitnami/kafka/data/ingest-3/00000000000182526165.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,714] INFO Deleted offset index /bitnami/kafka/data/ingest-3/00000000000182526165.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,713] INFO Deleted log /bitnami/kafka/data/ingest-3/00000000000182526165.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,713] INFO [LocalLog partition=ingest-3, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=182526165, size=16998661, lastModifiedTime=1715075758062, largestRecordTimestamp=Some(1715075758061)),LogSegment(baseOffset=182528560, size=16999718, lastModifiedTime=1715075763583, largestRecordTimestamp=Some(1715075763577)),LogSegment(baseOffset=182531056, size=16994792, lastModifiedTime=1715075768711, largestRecordTimestamp=Some(1715075768697)),LogSegment(baseOffset=182533514, size=16987578, lastModifiedTime=1715075773552, largestRecordTimestamp=Some(1715075773536)),LogSegment(baseOffset=182535953, size=16987705, lastModifiedTime=1715075779055, largestRecordTimestamp=Some(1715075779046)),LogSegment(baseOffset=182538482, size=16997466, lastModifiedTime=1715075784005, largestRecordTimestamp=Some(1715075784004)),LogSegment(baseOffset=182540856, size=16981250, lastModifiedTime=1715075789523, largestRecordTimestamp=Some(1715075789487)),LogSegment(baseOffset=182543386, size=16980484, lastModifiedTime=1715075794637, largestRecordTimestamp=Some(1715075794632)),LogSegment(baseOffset=182545622, size=16999738, lastModifiedTime=1715075799008, largestRecordTimestamp=Some(1715075799000)),LogSegment(baseOffset=182547827, size=16872695, lastModifiedTime=1715075803273, largestRecordTimestamp=Some(1715075803251)),LogSegment(baseOffset=182550001, size=16999890, lastModifiedTime=1715075808368, largestRecordTimestamp=Some(1715075808355)),LogSegment(baseOffset=182552113, size=16959982, lastModifiedTime=1715075813294, largestRecordTimestamp=Some(1715075813293)),LogSegment(baseOffset=182554415, size=16988073, lastModifiedTime=1715075817816, largestRecordTimestamp=Some(1715075817783)),LogSegment(baseOffset=182556814, size=16974731, lastModifiedTime=1715075823018, largestRecordTimestamp=Some(1715075823016)),LogSegment(baseOffset=182559282, size=16996090, lastModifiedTime=1715075828672, largestRecordTimestamp=Some(1715075828632)),LogSegment(baseOffset=182561708, size=16999327, lastModifiedTime=1715075833742, largestRecordTimestamp=Some(1715075833709)),LogSegment(baseOffset=182564173, size=16992947, lastModifiedTime=1715075839121, largestRecordTimestamp=Some(1715075839114)),LogSegment(baseOffset=182566740, size=16982572, lastModifiedTime=1715075844268, largestRecordTimestamp=Some(1715075844254)),LogSegment(baseOffset=182569086, size=16994786, lastModifiedTime=1715075850659, largestRecordTimestamp=Some(1715075850642)),LogSegment(baseOffset=182571815, size=16998391, lastModifiedTime=1715075856704, largestRecordTimestamp=Some(1715075856684)),LogSegment(baseOffset=182574372, size=16994403, lastModifiedTime=1715075861956, largestRecordTimestamp=Some(1715075861922)),LogSegment(baseOffset=182576828, size=16984546, lastModifiedTime=1715075865194, largestRecordTimestamp=Some(1715075865180)),LogSegment(baseOffset=182578716, size=16987846, lastModifiedTime=1715075868470, largestRecordTimestamp=Some(1715075868460)),LogSegment(baseOffset=182580437, size=16958237, lastModifiedTime=1715075873168, largestRecordTimestamp=Some(1715075873151)),LogSegment(baseOffset=182582637, size=16999432, lastModifiedTime=1715075877858, largestRecordTimestamp=Some(1715075877850)),LogSegment(baseOffset=182585006, size=16938567, lastModifiedTime=1715075882952, largestRecordTimestamp=Some(1715075882938)),LogSegment(baseOffset=182587493, size=16998214, lastModifiedTime=1715075888306, largestRecordTimestamp=Some(1715075888285)),LogSegment(baseOffset=182589965, size=16996264, lastModifiedTime=1715075893370, largestRecordTimestamp=Some(1715075893365)),LogSegment(baseOffset=182592327, size=16991650, lastModifiedTime=1715075898806, largestRecordTimestamp=Some(1715075898802)),LogSegment(baseOffset=182594863, size=16998234, lastModifiedTime=1715075903737, largestRecordTimestamp=Some(1715075903733)),LogSegment(baseOffset=182597289, size=16996241, lastModifiedTime=1715075908805, largestRecordTimestamp=Some(1715075908797)),LogSegment(baseOffset=182599811, size=16993657, lastModifiedTime=1715075913918, largestRecordTimestamp=Some(1715075913915)),LogSegment(baseOffset=182602171, size=16993112, lastModifiedTime=1715075918570, largestRecordTimestamp=Some(1715075918570)),LogSegment(baseOffset=182604245, size=16959963, lastModifiedTime=1715075922720, largestRecordTimestamp=Some(1715075922714)),LogSegment(baseOffset=182606451, size=16998518, lastModifiedTime=1715075927490, largestRecordTimestamp=Some(1715075927484)),LogSegment(baseOffset=182608616, size=16999103, lastModifiedTime=1715075932207, largestRecordTimestamp=Some(1715075932188)),LogSegment(baseOffset=182610888, size=16999389, lastModifiedTime=1715075937118, largestRecordTimestamp=Some(1715075937103)),LogSegment(baseOffset=182613221, size=16982597, lastModifiedTime=1715075942170, largestRecordTimestamp=Some(1715075942153)),LogSegment(baseOffset=182615634, size=16986904, lastModifiedTime=1715075947544, largestRecordTimestamp=Some(1715075947541)),LogSegment(baseOffset=182618074, size=16998820, lastModifiedTime=1715075952370, largestRecordTimestamp=Some(1715075952351)),LogSegment(baseOffset=182620446, size=16985066, lastModifiedTime=1715075957884, largestRecordTimestamp=Some(1715075957865)),LogSegment(baseOffset=182623007, size=16998235, lastModifiedTime=1715075963030, largestRecordTimestamp=Some(1715075963008)),LogSegment(baseOffset=182625520, size=16987568, lastModifiedTime=1715075967944, largestRecordTimestamp=Some(1715075967934)),LogSegment(baseOffset=182627921, size=16997118, lastModifiedTime=1715075973216, largestRecordTimestamp=Some(1715075973204)),LogSegment(baseOffset=182630290, size=16978465, lastModifiedTime=1715075978064, largestRecordTimestamp=Some(1715075978053)),LogSegment(baseOffset=182632463, size=16901644, lastModifiedTime=1715075982228, largestRecordTimestamp=Some(1715075982211)),LogSegment(baseOffset=182634546, size=16992477, lastModifiedTime=1715075986935, largestRecordTimestamp=Some(1715075986914)),LogSegment(baseOffset=182636738, size=16951087, lastModifiedTime=1715075991658, largestRecordTimestamp=Some(1715075991636)),LogSegment(baseOffset=182639001, size=16994471, lastModifiedTime=1715075996281, largestRecordTimestamp=Some(1715075996266)),LogSegment(baseOffset=182641298, size=16995754, lastModifiedTime=1715076001319, largestRecordTimestamp=Some(1715076001269)),LogSegment(baseOffset=182643712, size=16992752, lastModifiedTime=1715076006604, largestRecordTimestamp=Some(1715076006583)),LogSegment(baseOffset=182646095, size=16992944, lastModifiedTime=1715076011511, largestRecordTimestamp=Some(1715076011470)),LogSegment(baseOffset=182648504, size=16998993, lastModifiedTime=1715076016908, largestRecordTimestamp=Some(1715076016908)),LogSegment(baseOffset=182651018, size=16996765, lastModifiedTime=1715076021971, largestRecordTimestamp=Some(1715076021968)),LogSegment(baseOffset=182653526, size=16995808, lastModifiedTime=1715076026767, largestRecordTimestamp=Some(1715076026752)),LogSegment(baseOffset=182655860, size=16993535, lastModifiedTime=1715076032181, largestRecordTimestamp=Some(1715076032131)),LogSegment(baseOffset=182658341, size=16971926, lastModifiedTime=1715076037067, largestRecordTimestamp=Some(1715076037053)) (kafka.log.LocalLog$) +[2024-05-07 10:55:40,708] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180520112.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,708] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180517815.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,708] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180515281.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,708] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180512848.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,708] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180510439.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,708] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180508022.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,708] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180505674.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,708] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180503431.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,708] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180501183.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,708] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180499079.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,708] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180496930.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,708] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180494832.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180492304.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180489919.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180487377.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180484967.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180482560.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180480095.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180477735.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180475486.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180473259.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180471046.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180468968.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180466821.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180464299.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180461885.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180459366.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180456986.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180454546.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180452079.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180449601.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180447366.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180445367.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,707] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180443778.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180441466.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180438984.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180436204.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180433867.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180431327.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180428944.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180426459.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180424008.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180421560.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180419267.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180417063.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180414883.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180412733.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180410608.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180408118.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,706] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180405723.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,705] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180403261.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,705] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180400817.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,651] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180520112.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,651] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180520112.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,651] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180520112.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,651] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180517815.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,651] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180517815.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,651] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180517815.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,650] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180515281.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,650] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180515281.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,650] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180515281.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,650] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180512848.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,650] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180512848.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,650] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180512848.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,650] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180510439.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,650] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180510439.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,650] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180510439.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,650] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180508022.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,650] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180508022.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,650] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180508022.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,650] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180505674.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,650] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180505674.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180505674.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180503431.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180503431.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180503431.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180501183.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180501183.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180501183.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180499079.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180499079.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180499079.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180496930.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180496930.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180496930.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180494832.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,649] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180494832.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,648] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180494832.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,648] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180492304.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,648] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180492304.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,648] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180492304.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,648] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180489919.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,648] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180489919.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,648] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180489919.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,648] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180487377.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,648] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180487377.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,648] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180487377.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,648] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180484967.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,648] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180484967.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,648] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180484967.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180482560.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180482560.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180482560.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180480095.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180480095.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180480095.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180477735.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180477735.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180477735.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180475486.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180475486.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180475486.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180473259.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180473259.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180473259.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180471046.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,647] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180471046.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180471046.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180468968.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180468968.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180468968.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180466821.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180466821.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180466821.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180464299.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180464299.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180464299.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180461885.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180461885.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180461885.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180459366.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,646] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180459366.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,645] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180459366.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,645] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180456986.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,645] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180456986.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,645] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180456986.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,645] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180454546.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,645] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180454546.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,645] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180454546.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,645] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180452079.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,645] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180452079.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,645] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180452079.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,645] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180449601.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,645] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180449601.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,645] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180449601.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,645] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180447366.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,644] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180447366.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,644] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180447366.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,644] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180445367.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,644] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180445367.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,644] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180445367.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,644] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180443778.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,644] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180443778.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,644] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180443778.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,644] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180441466.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,644] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180441466.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,644] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180441466.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,644] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180438984.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,644] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180438984.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,643] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180438984.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,643] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180436204.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,643] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180436204.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,643] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180436204.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,643] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180433867.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,643] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180433867.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,643] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180433867.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,643] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180431327.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,643] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180431327.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,643] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180431327.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,643] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180428944.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,643] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180428944.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,643] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180428944.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180426459.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180426459.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180426459.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180424008.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180424008.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180424008.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180421560.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180421560.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180421560.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180419267.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180419267.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180419267.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180417063.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180417063.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,642] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180417063.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,641] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180414883.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,641] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180414883.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,641] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180414883.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,641] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180412733.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,641] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180412733.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,641] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180412733.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,641] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180410608.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,641] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180410608.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,641] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180410608.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,641] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180408118.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,641] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180408118.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,641] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180408118.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,641] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180405723.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,640] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180405723.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,640] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180405723.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,640] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180403261.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,640] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180403261.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,640] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180403261.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,640] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180400817.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,640] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180400817.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,640] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180400817.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,638] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=180400817, size=16997594, lastModifiedTime=1715075775780, largestRecordTimestamp=Some(1715075775771)),LogSegment(baseOffset=180403261, size=16992344, lastModifiedTime=1715075781053, largestRecordTimestamp=Some(1715075781021)),LogSegment(baseOffset=180405723, size=16989895, lastModifiedTime=1715075786205, largestRecordTimestamp=Some(1715075786174)),LogSegment(baseOffset=180408118, size=16998698, lastModifiedTime=1715075791681, largestRecordTimestamp=Some(1715075791673)),LogSegment(baseOffset=180410608, size=16995676, lastModifiedTime=1715075796438, largestRecordTimestamp=Some(1715075796430)),LogSegment(baseOffset=180412733, size=16963278, lastModifiedTime=1715075800534, largestRecordTimestamp=Some(1715075800511)),LogSegment(baseOffset=180414883, size=16984328, lastModifiedTime=1715075805272, largestRecordTimestamp=Some(1715075805230)),LogSegment(baseOffset=180417063, size=16989109, lastModifiedTime=1715075810381, largestRecordTimestamp=Some(1715075810372)),LogSegment(baseOffset=180419267, size=16996871, lastModifiedTime=1715075815153, largestRecordTimestamp=Some(1715075815125)),LogSegment(baseOffset=180421560, size=16988558, lastModifiedTime=1715075819785, largestRecordTimestamp=Some(1715075819763)),LogSegment(baseOffset=180424008, size=16999292, lastModifiedTime=1715075825336, largestRecordTimestamp=Some(1715075825303)),LogSegment(baseOffset=180426459, size=16990595, lastModifiedTime=1715075830839, largestRecordTimestamp=Some(1715075830827)),LogSegment(baseOffset=180428944, size=16995859, lastModifiedTime=1715075835942, largestRecordTimestamp=Some(1715075835904)),LogSegment(baseOffset=180431327, size=16992294, lastModifiedTime=1715075841219, largestRecordTimestamp=Some(1715075841214)),LogSegment(baseOffset=180433867, size=16966736, lastModifiedTime=1715075846443, largestRecordTimestamp=Some(1715075846401)),LogSegment(baseOffset=180436204, size=16894731, lastModifiedTime=1715075853273, largestRecordTimestamp=Some(1715075853244)),LogSegment(baseOffset=180438984, size=16983529, lastModifiedTime=1715075858911, largestRecordTimestamp=Some(1715075858891)),LogSegment(baseOffset=180441466, size=16996933, lastModifiedTime=1715075863566, largestRecordTimestamp=Some(1715075863554)),LogSegment(baseOffset=180443778, size=16999841, lastModifiedTime=1715075866199, largestRecordTimestamp=Some(1715075866185)),LogSegment(baseOffset=180445367, size=16992471, lastModifiedTime=1715075870385, largestRecordTimestamp=Some(1715075870347)),LogSegment(baseOffset=180447366, size=16999996, lastModifiedTime=1715075875102, largestRecordTimestamp=Some(1715075875091)),LogSegment(baseOffset=180449601, size=16994426, lastModifiedTime=1715075879927, largestRecordTimestamp=Some(1715075879926)),LogSegment(baseOffset=180452079, size=16998020, lastModifiedTime=1715075885293, largestRecordTimestamp=Some(1715075885263)),LogSegment(baseOffset=180454546, size=16992231, lastModifiedTime=1715075890424, largestRecordTimestamp=Some(1715075890409)),LogSegment(baseOffset=180456986, size=16970315, lastModifiedTime=1715075895719, largestRecordTimestamp=Some(1715075895690)),LogSegment(baseOffset=180459366, size=16990785, lastModifiedTime=1715075900996, largestRecordTimestamp=Some(1715075900985)),LogSegment(baseOffset=180461885, size=16996655, lastModifiedTime=1715075905847, largestRecordTimestamp=Some(1715075905841)),LogSegment(baseOffset=180464299, size=16982181, lastModifiedTime=1715075911052, largestRecordTimestamp=Some(1715075911028)),LogSegment(baseOffset=180466821, size=16997630, lastModifiedTime=1715075915962, largestRecordTimestamp=Some(1715075915953)),LogSegment(baseOffset=180468968, size=16995723, lastModifiedTime=1715075920325, largestRecordTimestamp=Some(1715075920308)),LogSegment(baseOffset=180471046, size=16979316, lastModifiedTime=1715075924724, largestRecordTimestamp=Some(1715075924697)),LogSegment(baseOffset=180473259, size=16995238, lastModifiedTime=1715075929645, largestRecordTimestamp=Some(1715075929624)),LogSegment(baseOffset=180475486, size=16988461, lastModifiedTime=1715075934288, largestRecordTimestamp=Some(1715075934283)),LogSegment(baseOffset=180477735, size=16993767, lastModifiedTime=1715075939277, largestRecordTimestamp=Some(1715075939270)),LogSegment(baseOffset=180480095, size=16995409, lastModifiedTime=1715075944639, largestRecordTimestamp=Some(1715075944635)),LogSegment(baseOffset=180482560, size=16992784, lastModifiedTime=1715075949760, largestRecordTimestamp=Some(1715075949760)),LogSegment(baseOffset=180484967, size=16990838, lastModifiedTime=1715075954937, largestRecordTimestamp=Some(1715075954929)),LogSegment(baseOffset=180487377, size=16976794, lastModifiedTime=1715075960151, largestRecordTimestamp=Some(1715075960119)),LogSegment(baseOffset=180489919, size=16997379, lastModifiedTime=1715075965116, largestRecordTimestamp=Some(1715075965085)),LogSegment(baseOffset=180492304, size=16956613, lastModifiedTime=1715075970448, largestRecordTimestamp=Some(1715075970424)),LogSegment(baseOffset=180494832, size=16895640, lastModifiedTime=1715075975354, largestRecordTimestamp=Some(1715075975341)),LogSegment(baseOffset=180496930, size=16998328, lastModifiedTime=1715075979813, largestRecordTimestamp=Some(1715075979796)),LogSegment(baseOffset=180499079, size=16995699, lastModifiedTime=1715075984309, largestRecordTimestamp=Some(1715075984285)),LogSegment(baseOffset=180501183, size=16993785, lastModifiedTime=1715075989086, largestRecordTimestamp=Some(1715075989064)),LogSegment(baseOffset=180503431, size=16989600, lastModifiedTime=1715075993713, largestRecordTimestamp=Some(1715075993683)),LogSegment(baseOffset=180505674, size=16984790, lastModifiedTime=1715075998337, largestRecordTimestamp=Some(1715075998318)),LogSegment(baseOffset=180508022, size=16982630, lastModifiedTime=1715076003671, largestRecordTimestamp=Some(1715076003660)),LogSegment(baseOffset=180510439, size=16999488, lastModifiedTime=1715076009000, largestRecordTimestamp=Some(1715076008996)),LogSegment(baseOffset=180512848, size=16997845, lastModifiedTime=1715076014033, largestRecordTimestamp=Some(1715076014032)),LogSegment(baseOffset=180515281, size=16990661, lastModifiedTime=1715076019245, largestRecordTimestamp=Some(1715076019216)),LogSegment(baseOffset=180517815, size=16996244, lastModifiedTime=1715076023989, largestRecordTimestamp=Some(1715076023963)),LogSegment(baseOffset=180520112, size=16992012, lastModifiedTime=1715076029243, largestRecordTimestamp=Some(1715076029231)) (kafka.log.LocalLog$) +[2024-05-07 10:55:40,632] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180398373.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,632] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180395889.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,632] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180393429.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,632] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-6/00000000000180391157.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,627] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180398373.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,627] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180398373.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,627] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180398373.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,627] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180395889.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,627] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180395889.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,627] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180395889.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,627] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180393429.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,627] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180393429.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,627] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180393429.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,627] INFO Deleted time index /bitnami/kafka/data/ingest-6/00000000000180391157.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,626] INFO Deleted offset index /bitnami/kafka/data/ingest-6/00000000000180391157.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,626] INFO Deleted log /bitnami/kafka/data/ingest-6/00000000000180391157.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,626] INFO [LocalLog partition=ingest-6, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=180391157, size=16991045, lastModifiedTime=1715075754780, largestRecordTimestamp=Some(1715075754774)),LogSegment(baseOffset=180393429, size=16997692, lastModifiedTime=1715075760206, largestRecordTimestamp=Some(1715075760186)),LogSegment(baseOffset=180395889, size=16998200, lastModifiedTime=1715075765542, largestRecordTimestamp=Some(1715075765526)),LogSegment(baseOffset=180398373, size=16977347, lastModifiedTime=1715075770515, largestRecordTimestamp=Some(1715075770504)) (kafka.log.LocalLog$) +[2024-05-07 10:55:40,626] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179265040.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,626] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179262715.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,626] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179260226.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,626] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179257861.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179255312.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179252915.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179250530.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179248096.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179245756.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179243472.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179241334.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179239147.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179237038.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179234885.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179232368.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179229948.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179227402.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179224988.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179222600.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179220122.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179217793.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179215514.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179213268.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179211133.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,625] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179208986.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179206836.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179204346.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179201897.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179199373.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179197009.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179194546.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179192076.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179189664.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179187398.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179185434.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179183786.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179181478.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179179037.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179176191.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179173853.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179171302.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179168915.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179166414.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179163962.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179161550.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,624] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179159230.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,623] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179157056.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,623] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179154861.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,623] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179152727.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,623] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179150568.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,623] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179148084.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,623] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179145674.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,623] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179143198.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,623] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179140761.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,623] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179138321.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,623] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179135832.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,623] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-7/00000000000179133378.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,572] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179265040.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,572] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179265040.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,572] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179265040.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,572] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179262715.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,572] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179262715.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,572] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179262715.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,572] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179260226.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,572] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179260226.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,572] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179260226.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,572] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179257861.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,572] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179257861.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,572] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179257861.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179255312.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179255312.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179255312.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179252915.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179252915.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179252915.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179250530.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179250530.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179250530.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179248096.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179248096.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179248096.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179245756.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179245756.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179245756.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,571] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179243472.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179243472.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179243472.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179241334.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179241334.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179241334.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179239147.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179239147.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179239147.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179237038.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179237038.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179237038.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179234885.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179234885.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179234885.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,570] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179232368.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,569] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179232368.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,569] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179232368.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,569] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179229948.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,569] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179229948.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,569] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179229948.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,569] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179227402.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,569] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179227402.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,569] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179227402.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,569] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179224988.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,569] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179224988.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,569] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179224988.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,569] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179222600.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,568] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179222600.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,568] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179222600.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,568] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179220122.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,568] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179220122.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,568] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179220122.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,568] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179217793.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,568] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179217793.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,568] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179217793.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,568] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179215514.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,568] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179215514.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,568] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179215514.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,568] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179213268.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,568] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179213268.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,568] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179213268.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179211133.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179211133.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179211133.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179208986.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179208986.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179208986.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179206836.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179206836.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179206836.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179204346.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179204346.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179204346.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179201897.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179201897.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179201897.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179199373.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179199373.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179199373.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179197009.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179197009.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,567] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179197009.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179194546.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179194546.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179194546.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179192076.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179192076.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179192076.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179189664.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179189664.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179189664.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179187398.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179187398.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179187398.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179185434.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179185434.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179185434.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179183786.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179183786.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,566] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179183786.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,565] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179181478.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,565] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179181478.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,565] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179181478.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,565] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179179037.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,565] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179179037.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,565] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179179037.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,565] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179176191.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,565] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179176191.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,565] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179176191.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,565] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179173853.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,564] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179173853.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,564] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179173853.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,564] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179171302.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,564] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179171302.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,564] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179171302.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,564] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179168915.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,564] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179168915.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,564] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179168915.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,564] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179166414.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,564] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179166414.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,564] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179166414.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,564] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179163962.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,564] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179163962.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,564] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179163962.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,563] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179161550.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,563] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179161550.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,563] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179161550.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,563] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179159230.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,563] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179159230.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,563] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179159230.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,563] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179157056.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,563] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179157056.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,563] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179157056.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,563] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179154861.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,563] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179154861.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,563] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179154861.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,563] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179152727.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,562] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179152727.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,562] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179152727.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,562] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179150568.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,562] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179150568.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,562] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179150568.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,562] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179148084.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,562] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179148084.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,562] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179148084.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,562] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179145674.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,562] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179145674.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,562] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179145674.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,562] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179143198.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,562] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179143198.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,562] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179143198.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,561] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179140761.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,561] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179140761.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,561] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179140761.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,561] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179138321.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,561] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179138321.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,561] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179138321.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,561] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179135832.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,561] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179135832.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,561] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179135832.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,561] INFO Deleted time index /bitnami/kafka/data/ingest-7/00000000000179133378.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,561] INFO Deleted offset index /bitnami/kafka/data/ingest-7/00000000000179133378.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,561] INFO Deleted log /bitnami/kafka/data/ingest-7/00000000000179133378.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,559] INFO [LocalLog partition=ingest-7, dir=/bitnami/kafka/data] Deleting segment files LogSegment(baseOffset=179133378, size=16987985, lastModifiedTime=1715075760072, largestRecordTimestamp=Some(1715075760047)),LogSegment(baseOffset=179135832, size=16999459, lastModifiedTime=1715075765431, largestRecordTimestamp=Some(1715075765398)),LogSegment(baseOffset=179138321, size=16994485, lastModifiedTime=1715075770425, largestRecordTimestamp=Some(1715075770404)),LogSegment(baseOffset=179140761, size=16996810, lastModifiedTime=1715075775622, largestRecordTimestamp=Some(1715075775619)),LogSegment(baseOffset=179143198, size=16998520, lastModifiedTime=1715075780912, largestRecordTimestamp=Some(1715075780889)),LogSegment(baseOffset=179145674, size=16988474, lastModifiedTime=1715075786051, largestRecordTimestamp=Some(1715075786030)),LogSegment(baseOffset=179148084, size=16956099, lastModifiedTime=1715075791514, largestRecordTimestamp=Some(1715075791486)),LogSegment(baseOffset=179150568, size=16995476, lastModifiedTime=1715075796360, largestRecordTimestamp=Some(1715075796329)),LogSegment(baseOffset=179152727, size=16993313, lastModifiedTime=1715075800440, largestRecordTimestamp=Some(1715075800430)),LogSegment(baseOffset=179154861, size=16992142, lastModifiedTime=1715075805147, largestRecordTimestamp=Some(1715075805135)),LogSegment(baseOffset=179157056, size=16999919, lastModifiedTime=1715075810155, largestRecordTimestamp=Some(1715075810153)),LogSegment(baseOffset=179159230, size=16995021, lastModifiedTime=1715075815018, largestRecordTimestamp=Some(1715075815016)),LogSegment(baseOffset=179161550, size=16966526, lastModifiedTime=1715075819528, largestRecordTimestamp=Some(1715075819521)),LogSegment(baseOffset=179163962, size=16990848, lastModifiedTime=1715075825066, largestRecordTimestamp=Some(1715075825042)),LogSegment(baseOffset=179166414, size=16997833, lastModifiedTime=1715075830662, largestRecordTimestamp=Some(1715075830656)),LogSegment(baseOffset=179168915, size=16992619, lastModifiedTime=1715075835771, largestRecordTimestamp=Some(1715075835741)),LogSegment(baseOffset=179171302, size=16999091, lastModifiedTime=1715075841031, largestRecordTimestamp=Some(1715075841022)),LogSegment(baseOffset=179173853, size=16993953, lastModifiedTime=1715075846197, largestRecordTimestamp=Some(1715075846181)),LogSegment(baseOffset=179176191, size=16997479, lastModifiedTime=1715075853192, largestRecordTimestamp=Some(1715075853172)),LogSegment(baseOffset=179179037, size=16997174, lastModifiedTime=1715075858693, largestRecordTimestamp=Some(1715075858682)),LogSegment(baseOffset=179181478, size=16986004, lastModifiedTime=1715075863400, largestRecordTimestamp=Some(1715075863396)),LogSegment(baseOffset=179183786, size=16995316, lastModifiedTime=1715075866123, largestRecordTimestamp=Some(1715075866112)),LogSegment(baseOffset=179185434, size=16990492, lastModifiedTime=1715075870154, largestRecordTimestamp=Some(1715075870146)),LogSegment(baseOffset=179187398, size=16999541, lastModifiedTime=1715075874980, largestRecordTimestamp=Some(1715075874961)),LogSegment(baseOffset=179189664, size=16987383, lastModifiedTime=1715075879670, largestRecordTimestamp=Some(1715075879639)),LogSegment(baseOffset=179192076, size=16991701, lastModifiedTime=1715075885010, largestRecordTimestamp=Some(1715075884995)),LogSegment(baseOffset=179194546, size=16989109, lastModifiedTime=1715075890220, largestRecordTimestamp=Some(1715075890208)),LogSegment(baseOffset=179197009, size=16962782, lastModifiedTime=1715075895466, largestRecordTimestamp=Some(1715075895456)),LogSegment(baseOffset=179199373, size=16974715, lastModifiedTime=1715075900757, largestRecordTimestamp=Some(1715075900746)),LogSegment(baseOffset=179201897, size=16993973, lastModifiedTime=1715075905639, largestRecordTimestamp=Some(1715075905638)),LogSegment(baseOffset=179204346, size=16979828, lastModifiedTime=1715075910798, largestRecordTimestamp=Some(1715075910782)),LogSegment(baseOffset=179206836, size=16992092, lastModifiedTime=1715075915638, largestRecordTimestamp=Some(1715075915632)),LogSegment(baseOffset=179208986, size=16988849, lastModifiedTime=1715075920193, largestRecordTimestamp=Some(1715075920176)),LogSegment(baseOffset=179211133, size=16989206, lastModifiedTime=1715075924352, largestRecordTimestamp=Some(1715075924338)),LogSegment(baseOffset=179213268, size=16989737, lastModifiedTime=1715075929343, largestRecordTimestamp=Some(1715075929332)),LogSegment(baseOffset=179215514, size=16997903, lastModifiedTime=1715075934074, largestRecordTimestamp=Some(1715075934056)),LogSegment(baseOffset=179217793, size=16995100, lastModifiedTime=1715075938937, largestRecordTimestamp=Some(1715075938925)),LogSegment(baseOffset=179220122, size=16981574, lastModifiedTime=1715075944296, largestRecordTimestamp=Some(1715075944288)),LogSegment(baseOffset=179222600, size=16999794, lastModifiedTime=1715075949454, largestRecordTimestamp=Some(1715075949432)),LogSegment(baseOffset=179224988, size=16998870, lastModifiedTime=1715075954567, largestRecordTimestamp=Some(1715075954544)),LogSegment(baseOffset=179227402, size=16986053, lastModifiedTime=1715075959815, largestRecordTimestamp=Some(1715075959813)),LogSegment(baseOffset=179229948, size=16999937, lastModifiedTime=1715075964787, largestRecordTimestamp=Some(1715075964779)),LogSegment(baseOffset=179232368, size=16992995, lastModifiedTime=1715075970109, largestRecordTimestamp=Some(1715075970096)),LogSegment(baseOffset=179234885, size=16995271, lastModifiedTime=1715075975078, largestRecordTimestamp=Some(1715075975066)),LogSegment(baseOffset=179237038, size=16987833, lastModifiedTime=1715075979534, largestRecordTimestamp=Some(1715075979499)),LogSegment(baseOffset=179239147, size=16844618, lastModifiedTime=1715075984150, largestRecordTimestamp=Some(1715075984139)),LogSegment(baseOffset=179241334, size=16968482, lastModifiedTime=1715075988727, largestRecordTimestamp=Some(1715075988700)),LogSegment(baseOffset=179243472, size=16991395, lastModifiedTime=1715075993359, largestRecordTimestamp=Some(1715075993333)),LogSegment(baseOffset=179245756, size=16985926, lastModifiedTime=1715075998010, largestRecordTimestamp=Some(1715075998005)),LogSegment(baseOffset=179248096, size=16948574, lastModifiedTime=1715076003328, largestRecordTimestamp=Some(1715076003298)),LogSegment(baseOffset=179250530, size=16986047, lastModifiedTime=1715076008650, largestRecordTimestamp=Some(1715076008628)),LogSegment(baseOffset=179252915, size=16998875, lastModifiedTime=1715076013551, largestRecordTimestamp=Some(1715076013516)),LogSegment(baseOffset=179255312, size=16997990, lastModifiedTime=1715076018832, largestRecordTimestamp=Some(1715076018797)),LogSegment(baseOffset=179257861, size=16999525, lastModifiedTime=1715076023621, largestRecordTimestamp=Some(1715076023601)),LogSegment(baseOffset=179260226, size=16997755, lastModifiedTime=1715076028814, largestRecordTimestamp=Some(1715076028800)),LogSegment(baseOffset=179262715, size=16981492, lastModifiedTime=1715076034150, largestRecordTimestamp=Some(1715076034140)),LogSegment(baseOffset=179265040, size=16998332, lastModifiedTime=1715076038676, largestRecordTimestamp=Some(1715076038657)) (kafka.log.LocalLog$) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000182001921.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181999412.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181997117.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181994584.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181992094.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181989668.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181987270.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181984895.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181982607.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181980350.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181978204.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181976053.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181973936.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181971572.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,551] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181969211.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181966656.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181964073.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181961729.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181959280.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181956904.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181954523.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181952236.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181950054.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181947907.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181945800.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181943454.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181941038.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181938558.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181935954.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181933616.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181931134.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181928665.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181926263.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181924068.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181922378.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181920396.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181917993.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181915440.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181912768.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181910396.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181907887.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181905474.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181903009.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181900581.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,550] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181898115.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,549] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181895813.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,549] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181893700.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,549] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181891506.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,549] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181889342.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,549] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181887090.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,549] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181884612.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,549] INFO Deleted producer state snapshot /bitnami/kafka/data/ingest-2/00000000000181882216.snapshot.deleted (kafka.log.SnapshotFile) +[2024-05-07 10:55:40,494] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000182001921.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,494] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000182001921.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,494] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000182001921.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,494] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181999412.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,494] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181999412.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,494] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181999412.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,494] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181997117.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,494] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181997117.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,494] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181997117.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,494] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181994584.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,494] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181994584.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,494] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181994584.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,494] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181992094.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,493] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181992094.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,493] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181992094.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,493] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181989668.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,493] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181989668.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,493] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181989668.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,493] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181987270.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,493] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181987270.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,493] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181987270.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,492] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181984895.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,492] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181984895.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,492] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181984895.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,492] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181982607.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,492] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181982607.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,492] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181982607.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,492] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181980350.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,492] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181980350.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,492] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181980350.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,492] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181978204.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,492] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181978204.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,492] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181978204.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,492] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181976053.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181976053.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181976053.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181973936.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181973936.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181973936.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181971572.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181971572.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181971572.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181969211.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181969211.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181969211.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181966656.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181966656.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181966656.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181964073.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181964073.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181964073.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181961729.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181961729.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181961729.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,491] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181959280.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181959280.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181959280.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181956904.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181956904.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181956904.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181954523.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181954523.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181954523.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181952236.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181952236.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181952236.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181950054.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181950054.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181950054.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181947907.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181947907.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181947907.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181945800.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181945800.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,490] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181945800.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181943454.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181943454.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181943454.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181941038.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181941038.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181941038.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181938558.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181938558.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181938558.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181935954.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181935954.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181935954.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181933616.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181933616.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,489] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181933616.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181931134.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181931134.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181931134.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181928665.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181928665.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181928665.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181926263.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181926263.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181926263.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181924068.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181924068.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181924068.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181922378.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181922378.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,488] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181922378.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181920396.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181920396.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181920396.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181917993.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181917993.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181917993.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181915440.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181915440.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181915440.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181912768.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181912768.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181912768.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181910396.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181910396.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,487] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181910396.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,486] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181907887.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,486] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181907887.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,486] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181907887.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,486] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181905474.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,486] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181905474.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,486] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181905474.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,486] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181903009.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,486] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181903009.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,486] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181903009.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,486] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181900581.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,486] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181900581.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,486] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181900581.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,486] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181898115.timeindex.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,486] INFO Deleted offset index /bitnami/kafka/data/ingest-2/00000000000181898115.index.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,485] INFO Deleted log /bitnami/kafka/data/ingest-2/00000000000181898115.log.deleted. (kafka.log.LogSegment) +[2024-05-07 10:55:40,485] INFO Deleted time index /bitnami/kafka/data/ingest-2/00000000000181895813.timeindex.deleted. (kafka.log.LogSegment) \ No newline at end of file diff --git a/pkg/pattern/drain/testdata/kubernetes.txt b/pkg/pattern/drain/testdata/kubernetes.txt new file mode 100644 index 0000000000000..e4a872143dc21 --- /dev/null +++ b/pkg/pattern/drain/testdata/kubernetes.txt @@ -0,0 +1,1000 @@ +I0507 12:04:17.597069 1 descheduler.go:169] "Number of evicted pods" totalEvicted=0 +I0507 12:04:17.597049 1 profile.go:356] "Total number of pods evicted" extension point="Balance" evictedPods=0 +I0507 12:04:17.597015 1 nodeutilization.go:269] "No removable pods on node, try next node" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-4z2l" +I0507 12:04:17.596999 1 nodeutilization.go:266] "Pods on node" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-4z2l" allPods=16 nonRemovablePods=16 removablePods=0 +I0507 12:04:17.596973 1 defaultevictor.go:202] "Pod fails the following checks" pod="node-exporter/node-exporter-h82wd" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:04:17.596950 1 defaultevictor.go:202] "Pod fails the following checks" pod="promtail-ops/loki-canary-n5p56" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:04:17.596932 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/calico-node-cnc6m" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:04:17.596875 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/konnectivity-agent-6f8f85c4fb-7bhvf" checks="[pod has system critical priority, pod has higher priority than specified priority class threshold]" +I0507 12:04:17.596860 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/calico-typha-7cc4789bc8-qhw5r" checks="[pod has system critical priority, pod has higher priority than specified priority class threshold]" +I0507 12:04:17.596847 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/pdcsi-node-7khn6" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:04:17.596827 1 defaultevictor.go:202] "Pod fails the following checks" pod="netfilter-exporter/netfilter-exporter-jkrhn" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:04:17.596803 1 defaultevictor.go:202] "Pod fails the following checks" pod="gadget/gadget-zjjts" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:04:17.596779 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/ip-masq-agent-7pgw2" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold]" +I0507 12:04:17.596753 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/kube-proxy-gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-4z2l" checks="[pod is a mirror pod, pod is a static pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:04:17.596722 1 defaultevictor.go:202] "Pod fails the following checks" pod="startup/startup-sjjws" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:04:17.596704 1 defaultevictor.go:202] "Pod fails the following checks" pod="goldpinger/goldpinger-rc2pp" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:04:17.596685 1 defaultevictor.go:202] "Pod fails the following checks" pod="agent-logs/agent-lmlhl" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:04:17.596651 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/metrics-server-v0.6.3-68f5b7c4d5-t5mz8" checks="[pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:04:17.596620 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/node-local-dns-w6xfd" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:04:17.596581 1 defaultevictor.go:202] "Pod fails the following checks" pod="conntrack-exporter/conntrack-exporter-rhm24" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:04:17.596547 1 nodeutilization.go:263] "Evicting pods from node" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-4z2l" usage={"cpu":"984m","memory":"611Mi","pods":"16"} +I0507 12:04:17.596528 1 nodeutilization.go:260] "Total capacity to be moved" CPU=5060 Mem=112216292800 Pods=163 +I0507 12:04:17.596504 1 highnodeutilization.go:108] "Number of underutilized nodes" totalNumber=1 +I0507 12:04:17.596484 1 highnodeutilization.go:107] "Criteria for a node below target utilization" CPU=50 Mem=50 Pods=100 +I0507 12:04:17.596431 1 nodeutilization.go:204] "Node is underutilized" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-4z2l" usage={"cpu":"984m","memory":"611Mi","pods":"16"} usagePercentage={"cpu":12.44,"memory":2.15,"pods":25} +I0507 12:04:17.596411 1 nodeutilization.go:207] "Node is overutilized" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-nxkm" usage={"cpu":"7813m","memory":"23232Mi","pods":"64"} usagePercentage={"cpu":98.77,"memory":81.8,"pods":100} +I0507 12:04:17.596391 1 nodeutilization.go:207] "Node is overutilized" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-jwtb" usage={"cpu":"4681m","memory":"5668116096","pods":"32"} usagePercentage={"cpu":59.18,"memory":19.03,"pods":50} +I0507 12:04:17.596369 1 nodeutilization.go:207] "Node is overutilized" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-h5xd" usage={"cpu":"7452m","memory":"16073Mi","pods":"37"} usagePercentage={"cpu":94.21,"memory":56.6,"pods":57.81} +I0507 12:04:17.596354 1 nodeutilization.go:207] "Node is overutilized" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-mzxx" usage={"cpu":"7586m","memory":"10644Mi","pods":"54"} usagePercentage={"cpu":95.9,"memory":37.48,"pods":84.38} +I0507 12:04:17.596339 1 nodeutilization.go:207] "Node is overutilized" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-kpwx" usage={"cpu":"7851m","memory":"14566Mi","pods":"38"} usagePercentage={"cpu":99.25,"memory":51.29,"pods":59.38} +I0507 12:04:17.596321 1 nodeutilization.go:207] "Node is overutilized" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-kvt4" usage={"cpu":"7146m","memory":"13107674048","pods":"43"} usagePercentage={"cpu":90.34,"memory":44.02,"pods":67.19} +I0507 12:04:17.596282 1 nodeutilization.go:207] "Node is overutilized" node="gke-dev-eu-west-3-main-n2s8-1-1dd39c-d1c92061-95l9" usage={"cpu":"7781m","memory":"9358Mi","pods":"17"} usagePercentage={"cpu":98.37,"memory":32.95,"pods":26.56} +I0507 12:04:17.595169 1 descheduler.go:155] Building a pod evictor +I0507 12:03:30.627632 1 reflector.go:790] k8s.io/client-go/informers/factory.go:150: Watch close - *v1.Node total 71 items received +I0507 12:02:50.027655 1 reflector.go:800] k8s.io/client-go/informers/factory.go:159: Watch close - *v1.PriorityClass total 7 items received +I0507 12:02:49.544175 1 reflector.go:790] k8s.io/client-go/informers/factory.go:150: Watch close - *v1.PriorityClass total 7 items received +I0507 12:02:27.989125 1 descheduler.go:170] "Number of evicted pods" totalEvicted=7 +I0507 12:02:27.989104 1 profile.go:349] "Total number of pods evicted" extension point="Balance" evictedPods=7 +I0507 12:02:27.989035 1 nodeutilization.go:270] "No removable pods on node, try next node" node="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-kvxj" +I0507 12:02:27.989023 1 nodeutilization.go:267] "Pods on node" node="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-kvxj" allPods=64 nonRemovablePods=64 removablePods=0 +I0507 12:02:27.989011 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-wzsl5" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988996 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-kvxj" checks="[pod is a mirror pod, pod is a static pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.988976 1 defaultevictor.go:202] "Pod fails the following checks" pod="agent-logs/agent-x46tw" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.988959 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-btb5z" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988945 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-g5jbk" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988923 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-nzxht" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988887 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-9sxjh" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988870 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-lhznv" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988856 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-4v5zd" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988834 1 defaultevictor.go:202] "Pod fails the following checks" pod="netfilter-exporter/netfilter-exporter-vsqft" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.988797 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-d4v58" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988783 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-29rrx" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988774 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-kh7j2" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988762 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-lds25" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988753 1 defaultevictor.go:202] "Pod fails the following checks" pod="promtail-ops/loki-canary-452mv" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.988741 1 defaultevictor.go:202] "Pod fails the following checks" pod="ge-logs/promtail-42tsn" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.988732 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-t6jhd" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988721 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-8rvkp" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988711 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-zzhkf" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988699 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-vmsn7" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988688 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-clkw9" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988674 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/calico-node-dckq8" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.988658 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/node-local-dns-8l5g8" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.988645 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-w8x2s" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988637 1 defaultevictor.go:202] "Pod fails the following checks" pod="pyroscope-ebpf/profiler-bgr54" checks="pod is a DaemonSet pod" +I0507 12:02:27.988625 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-kshxn" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988614 1 defaultevictor.go:202] "Pod fails the following checks" pod="node-exporter/node-exporter-8wcvq" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.988603 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-n7zqn" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988591 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-z2lsr" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988582 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-f8wqb" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988571 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-cpsm2" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988559 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-gfblm" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988550 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-frww2" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988539 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-bkrll" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988528 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-2xxtn" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988517 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-tmh6q" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988508 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-wqrfc" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988495 1 defaultevictor.go:202] "Pod fails the following checks" pod="startup/startup-9kxhq" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.988483 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-srn7j" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988470 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-j5rfq" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988461 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/kube-dns-67d96b65b4-8rnn8" checks="[pod has system critical priority, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.988450 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-d4jth" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988437 1 defaultevictor.go:202] "Pod fails the following checks" pod="promtail-ops/agent-7j4xl" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.988425 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-xgx6z" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988413 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-8sxrr" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988395 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-zdjqg" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988380 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-wgvtz" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988364 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-hfh47" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988353 1 defaultevictor.go:202] "Pod fails the following checks" pod="conntrack-exporter/conntrack-exporter-sz7vn" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.988343 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-2llkt" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988331 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-dvsmf" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988318 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/ip-masq-agent-s9wbt" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.988305 1 defaultevictor.go:202] "Pod fails the following checks" pod="loki-dev-ssd/promtail-loki-dev-ssd-nb7hr" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.988292 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-qjdpq" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988281 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-98gpk" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988268 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/pdcsi-node-zgl66" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.988254 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-8jrxv" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988242 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-6ksff" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988228 1 defaultevictor.go:202] "Pod fails the following checks" pod="insight-logs/promtail-insight-logs-2zxd5" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.988215 1 defaultevictor.go:202] "Pod fails the following checks" pod="goldpinger/goldpinger-dhx94" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.988200 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-crfdm" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988190 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-7jb6p" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988176 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-6nmnh" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988167 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-87kvn" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988152 1 nodeutilization.go:264] "Evicting pods from node" node="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-kvxj" usage={"cpu":"13086m","memory":"24314Mi","pods":"64"} +I0507 12:02:27.988131 1 nodeutilization.go:270] "No removable pods on node, try next node" node="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-2dqk" +I0507 12:02:27.988120 1 nodeutilization.go:267] "Pods on node" node="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-2dqk" allPods=64 nonRemovablePods=64 removablePods=0 +I0507 12:02:27.988101 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-q5vxs" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988086 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-swkjg" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988074 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-52t6h" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988065 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-xq4dj" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988057 1 defaultevictor.go:202] "Pod fails the following checks" pod="promtail-ops/loki-canary-4r5hm" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.988047 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-xr75f" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988038 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-47lr4" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988027 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-rpll7" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988017 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-gtvn4" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.988006 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-lkkbt" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987995 1 defaultevictor.go:202] "Pod fails the following checks" pod="conntrack-exporter/conntrack-exporter-z6jpx" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.987983 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-ln6rm" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987974 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-hhnmg" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987962 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-r57h9" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987953 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-vvb5c" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987941 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-nhzzj" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987930 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-czjpc" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987918 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-vrdr8" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987906 1 defaultevictor.go:202] "Pod fails the following checks" pod="netfilter-exporter/netfilter-exporter-8xfwd" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.987892 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-w4262" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987881 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-tjgjt" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987868 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-rhbm8" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987858 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/ip-masq-agent-g4tkt" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.987843 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-lqc8k" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987825 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-qv94s" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987797 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-l47lf" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987786 1 defaultevictor.go:202] "Pod fails the following checks" pod="machine-learning/modelapi-764cffb67d-l9dcn" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987775 1 defaultevictor.go:202] "Pod fails the following checks" pod="ge-logs/promtail-f7652" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.987760 1 defaultevictor.go:202] "Pod fails the following checks" pod="loki-dev-ssd/promtail-loki-dev-ssd-ss852" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.987748 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-spvj4" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987735 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-fpx5r" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987725 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-khmgd" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987712 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/calico-node-hqcjm" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.987695 1 defaultevictor.go:202] "Pod fails the following checks" pod="startup/startup-7wzwp" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.987682 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/calico-typha-558bd55895-gnzv8" checks="[pod has system critical priority, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.987667 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/kube-proxy-gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-2dqk" checks="[pod is a mirror pod, pod is a static pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.987654 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-zq957" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987642 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-gpftr" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987629 1 defaultevictor.go:202] "Pod fails the following checks" pod="goldpinger/goldpinger-h59vt" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.987616 1 defaultevictor.go:202] "Pod fails the following checks" pod="node-exporter/node-exporter-95qf6" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.987602 1 defaultevictor.go:202] "Pod fails the following checks" pod="agent-logs/agent-bcjbd" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.987587 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-bt6g2" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987576 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-llpjz" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987567 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-2rcmz" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987555 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-s9wjc" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987546 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-fsfgs" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987532 1 defaultevictor.go:202] "Pod fails the following checks" pod="insight-logs/promtail-insight-logs-66v7r" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.987518 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-skqxf" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987502 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/node-local-dns-fkf54" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.987486 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-v4m6q" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987477 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-fkzj2" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987464 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-nqkp4" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987456 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-nsgjr" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987443 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-6bb7s" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987434 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-vjltk" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987421 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-vzf9l" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987412 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-wgwkk" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987398 1 defaultevictor.go:202] "Pod fails the following checks" pod="promtail-ops/agent-g8hs2" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.987385 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-nhr9q" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987375 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-4f6cz" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987360 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-d2dvr" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987344 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/pdcsi-node-8gclq" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.987307 1 defaultevictor.go:202] "Pod fails the following checks" pod="pyroscope-ebpf/profiler-kl2gs" checks="pod is a DaemonSet pod" +I0507 12:02:27.987291 1 defaultevictor.go:202] "Pod fails the following checks" pod="test-agents-01/test-agent-64975684fc-t6nrs" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.987263 1 nodeutilization.go:264] "Evicting pods from node" node="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-2dqk" usage={"cpu":"13776m","memory":"23960Mi","pods":"64"} +I0507 12:02:27.987229 1 defaultevictor.go:163] "pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.987222 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-q8mb" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.987212 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.987123 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-fg8w" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.987108 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.986970 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-t8zj" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory, insufficient pods]" +I0507 12:02:27.986953 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.986746 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-z2rp" error:="insufficient cpu" +I0507 12:02:27.986731 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.986658 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-lhp2" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.986646 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.986520 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vf2b" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.986510 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.986423 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-g8vs" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.986409 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.986222 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-otel-alt-n2s4-0--3cf760c5-s8l4" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.986212 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.986109 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-vmrk" error:="insufficient cpu" +I0507 12:02:27.986101 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.986030 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-cd97" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.986017 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.985902 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-9lbc" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.985884 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.985765 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-mdnz" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.985744 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.985592 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-t2kf" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.985582 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.985484 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-c4qb" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.985473 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.985326 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-mfbb" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.985308 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.985136 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-8z5k" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.985116 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.984968 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-q7hc" error:="insufficient cpu" +I0507 12:02:27.984941 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.984869 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-hmlg" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.984856 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.984736 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-4x8m" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.984722 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.984594 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-r8v7" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.984576 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.984505 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-8n9g" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.984488 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.984372 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-k2r2" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.984349 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.984182 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7jmw" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.984157 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.984017 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-6qq6" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.983998 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.983898 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-tjd7" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.983857 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.983625 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xv96" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.983611 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.983494 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-97ds" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.983481 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.983395 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-qmgr" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.983370 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.983153 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-n56x" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.983128 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.982977 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-m294" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.982957 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.982779 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dd5b" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory, insufficient pods]" +I0507 12:02:27.982756 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.982476 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-5rdf" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.982456 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.982240 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-4kpb" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.982218 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.982082 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-h9bx" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.982056 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.981917 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-q6f2" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.981888 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.981639 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-4hrn" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.981618 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.981517 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-cvf5" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.981489 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.981269 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-r94w" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.981238 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.980922 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-lw2b" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.980893 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.980636 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-995l" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.980611 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.980489 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-lv5d" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.980467 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.980082 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-psgn" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.980064 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.979950 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-hww7" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.979933 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.979864 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-g9gd" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.979836 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.979580 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-sskn" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.979556 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.979425 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dhmp" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.979400 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.979237 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-ml9j" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.979212 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.979085 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7zvh" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu, insufficient pods]" +I0507 12:02:27.979062 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.978799 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-rn42" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.978781 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.978632 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-8nfg" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.978620 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.978507 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x9df" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.978487 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.978322 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-h9sj" error:="[pod node selector does not match the node label, insufficient cpu, insufficient pods]" +I0507 12:02:27.978299 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.978067 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-h7jr" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.978047 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.977940 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-n6g2" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.977911 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.977687 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vggg" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.977669 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.977559 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-484z" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.977539 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.977411 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7nvj" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.977387 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.977057 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-6lcp" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.977039 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.976879 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-6vzp" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.976857 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.976722 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-8dwk" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.976701 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.976610 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xzrv" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.976588 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.976466 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-j94k" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.976441 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.976303 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-c9m7" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.976280 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.976133 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-hcwk" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.976121 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.976032 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-mstl" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.976015 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.975890 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-lnkb" error:="insufficient cpu" +I0507 12:02:27.975875 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.975773 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-h8bz" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.975755 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.975647 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-8tx9" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.975627 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.975505 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-hr4c" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.975487 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.975382 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-ndsg" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.975364 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.975254 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vdj8" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.975234 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.975062 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-8f8j" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.975047 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.974927 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-lqg5" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.974906 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.974823 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-2mdh" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.974793 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.974678 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-bkt9" error:="[insufficient cpu, insufficient memory]" +I0507 12:02:27.974662 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.974560 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-j2vt" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.974543 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.974396 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-5snc" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient memory]" +I0507 12:02:27.974373 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.974292 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-databenchloki-n2-8c6b6266-2tvt" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.974271 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.974154 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-7pn8" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.974131 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.973939 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-perf-n2s8-0-1dd3-91689928-6p26" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.973920 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.973840 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-z9jp" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.973821 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.973710 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-v84d" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.973697 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.973581 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-npcc" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.973565 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.973467 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-phd7" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.973445 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.973319 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-5lww" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.973303 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.972989 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-7chh" error:="insufficient cpu" +I0507 12:02:27.972977 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.972907 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-2dqk" error:="[pod node selector does not match the node label, insufficient pods]" +I0507 12:02:27.972894 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.972723 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dkbf" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.972712 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.972606 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-n6p2" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.972595 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.972493 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-nf55" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.972479 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.972360 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-98xh" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.972348 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.972246 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xnql" error:="pod node selector does not match the node label" +I0507 12:02:27.972226 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.972006 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-rr2n" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.971993 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.971885 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-kdh7" error:="insufficient cpu" +I0507 12:02:27.971873 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.971778 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-jfjs" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.971766 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.971694 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7h6b" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.971684 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.971601 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-5vvr" error:="[pod node selector does not match the node label, insufficient cpu, insufficient pods]" +I0507 12:02:27.971588 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.971426 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-96t4" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.971414 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.971312 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-9zxx" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.971301 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.971214 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-5k6z" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.971204 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.971138 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-lq5m" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.971122 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.970921 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dhdn" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.970908 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.970783 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-x4p4" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.970760 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.970615 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-perf-n2s8-0-1dd3-91689928-9f87" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.970605 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.970551 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-mbt8" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient memory]" +I0507 12:02:27.970540 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.970480 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-twvx" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.970469 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.970365 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-g6ld" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.970350 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.970200 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-cndv" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.970181 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.970095 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-p4tv" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.970084 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.969971 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-4flx" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.969953 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.969795 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-2pc2" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.969784 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.969676 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-t86w" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.969664 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.969584 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-q9ck" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.969570 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.969510 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-zp8f" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.969499 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.969408 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-zbcz" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.969397 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.969336 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-g7ks" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.969322 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.969093 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-bkfl" error:="insufficient cpu" +I0507 12:02:27.969081 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.969010 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-s8bh" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.968996 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.968822 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-cvzg" error:="insufficient cpu" +I0507 12:02:27.968797 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.968730 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-m75z" error:="[insufficient cpu, insufficient memory]" +I0507 12:02:27.968720 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.968651 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xstn" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.968636 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.968516 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-lngb" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.968505 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.968402 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-9nlx" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.968392 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.968318 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-8jjt" error:="insufficient cpu" +I0507 12:02:27.968309 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.968230 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-462z" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.968218 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.968105 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-cc5j" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.968091 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.967958 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-databenchloki-n2-62e9c9a0-gn4f" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.967937 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.967868 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-gmgr" error:="insufficient cpu" +I0507 12:02:27.967856 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.967768 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-78dr" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.967757 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.967637 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-2nls" error:="[insufficient cpu, insufficient memory]" +I0507 12:02:27.967624 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.967537 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-frj7" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.967517 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.967342 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7kpc" error:="pod node selector does not match the node label" +I0507 12:02:27.967323 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.967224 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-8kcm" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.967203 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.967108 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-f96h" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.967079 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.966913 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-nvpf" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.966893 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.966739 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dnbd" error:="[pod node selector does not match the node label, insufficient cpu, insufficient pods]" +I0507 12:02:27.966715 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.966460 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-d88q" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.966438 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.966225 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-nx8q" error:="[pod node selector does not match the node label, insufficient cpu, insufficient pods]" +I0507 12:02:27.966179 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.965869 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-s8nm" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.965848 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.965669 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s4-7-1dd39c-6f2ad845-dzq7" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.965650 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.965554 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-6sbt" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.965478 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.965265 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-gwtz" error:="[pod node selector does not match the node label, insufficient cpu, insufficient pods]" +I0507 12:02:27.965239 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.964834 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x82q" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.964798 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.964642 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-qsmq" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.964547 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.964368 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dhh8" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.964342 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.964158 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xcm6" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.964123 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.964003 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-lrkw" error:="[insufficient memory, insufficient cpu]" +I0507 12:02:27.963986 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.963876 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-5jwm" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.963850 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.963647 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-2z44" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.963629 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.963469 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-md4q" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.963451 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.963332 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-wtb8" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.963302 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.963053 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-jdt8" error:="[insufficient memory, insufficient cpu]" +I0507 12:02:27.963032 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.962923 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-b2ds" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.962902 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.962766 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-kvxj" error:="[pod node selector does not match the node label, insufficient pods]" +I0507 12:02:27.962749 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.962539 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-v7l7" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.962530 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.962468 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-kkh5" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.962455 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.962336 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dqf8" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.962316 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.962164 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-bz7s" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.962139 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.961976 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-perf-n2s8-0-1dd3-91689928-mm2d" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.961963 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.961886 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-78d9" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.961867 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.961726 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-nbp9" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.961699 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.961490 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-g75l" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.961478 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.961399 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-m7wp" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.961386 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.961287 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-nrwm" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.961277 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.961216 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7lvz" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.961205 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.961117 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-69n7" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.961107 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.961004 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-rvzj" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.960993 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.960917 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vlg7" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.960896 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.960788 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-databenchloki-n2-8c6b6266-sspk" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.960776 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.960670 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-rv2l" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.960645 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.960467 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-r95m" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.960454 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.960323 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-254k" error:="pod node selector does not match the node label" +I0507 12:02:27.960310 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.960221 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x28r" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu, insufficient pods]" +I0507 12:02:27.960201 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.959977 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vvgr" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.959951 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.959827 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-mqr6" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.959795 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.959735 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vb4s" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.959718 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.959617 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-perf-n2s8-0-1dd3-91689928-hl8m" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.959605 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.959538 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-f2n6" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.959525 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.959431 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-mjps" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.959419 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.959359 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-mwjl" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.959343 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.959204 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-jl7q" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.959185 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.959080 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-gpu-llm-g2s8-0-1-db560ef7-hkjw" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient memory]" +I0507 12:02:27.959061 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.958991 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-28sf" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.958977 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.958835 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-perf-n2s8-0-1dd3-91689928-q9n6" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.958815 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.958737 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vznd" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.958719 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.958586 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-74lw" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.958570 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.958450 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-jccs" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient memory]" +I0507 12:02:27.958432 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.958359 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-c687" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.958345 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.958228 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-89hx" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.958204 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.958101 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-t4fv" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.958078 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.957967 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vpgr" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.957946 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.957824 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-databenchloki-n2-8c6b6266-bz78" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.957790 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.957680 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-h8cp" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.957661 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.957566 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-4qc8" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient memory]" +I0507 12:02:27.957543 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.957462 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-lgmg" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.957446 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.957318 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-h8wf" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.957301 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.957127 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-9fp2" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.957110 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.957033 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-n9zk" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.957016 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.956956 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-mpm6" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.956945 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.956838 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-hzp5" error:="insufficient cpu" +I0507 12:02:27.956826 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.956721 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-kqpq" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient memory]" +I0507 12:02:27.956708 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.956643 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-nz7w" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.956626 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.956522 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xwpk" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.956502 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.956401 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-hgdb" error:="[pod node selector does not match the node label, insufficient cpu, insufficient pods]" +I0507 12:02:27.956385 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.956214 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-s6kw" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.956189 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.955722 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-rpmg" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.955708 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.955581 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-lnlr" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.955565 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.955501 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-9ghc" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.955484 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.955369 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-hsq7" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.955347 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.955205 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-nrth" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.955192 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.955053 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-86xp" error:="insufficient cpu" +I0507 12:02:27.955039 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.954944 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-4np5" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.954924 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.954781 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-7vfx" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient memory, insufficient cpu]" +I0507 12:02:27.954762 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.954656 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-c9px" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.954640 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.954568 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xm9z" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.954555 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.954473 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-thpn" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.954460 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.954366 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-g4ml" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient memory, insufficient cpu]" +I0507 12:02:27.954347 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.954223 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-c7rv" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.954210 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.954066 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-otel-n2s4-0-1dd3-b196a3e4-gvt7" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient memory]" +I0507 12:02:27.954051 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.953882 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-j5wp" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.953832 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.953690 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7llm" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.953673 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.953501 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7c5w" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory, insufficient pods]" +I0507 12:02:27.953482 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.953255 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-tqwd" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient memory, insufficient cpu]" +I0507 12:02:27.953233 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.953141 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-z6f6" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.953125 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.952955 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-885v" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.952933 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.952721 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-4jtn" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.952693 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.952500 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-6bbf" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.952474 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.952343 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dc4k" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.952324 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.952096 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-5nj8" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient memory]" +I0507 12:02:27.952073 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.951920 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-5kz8" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.951905 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.951719 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-cz4q" error:="insufficient cpu" +I0507 12:02:27.951706 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.951613 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-9jbm" error:="[pod node selector does not match the node label, insufficient cpu, insufficient pods]" +I0507 12:02:27.951595 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.951400 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x94l" error:="[pod node selector does not match the node label, insufficient cpu, insufficient pods]" +I0507 12:02:27.951380 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.951160 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-d8sv" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.951132 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.950992 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-databenchloki-n2-8c6b6266-k4v7" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.950975 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.950868 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vpmb" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.950848 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.950675 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-qqlx" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.950653 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.950509 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dtw8" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.950491 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.950368 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-t94m" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.950357 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.950221 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-kv65" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.950197 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.950038 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x24s" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.950018 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.949880 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-hg-n2s4-7-1dd39c-6f2ad845-7lsj" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.949860 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.949749 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-trgv" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.949722 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.949485 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-d6b6" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.949467 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.949345 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-qg78" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.949323 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.949136 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-8wbq" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.949117 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.948997 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-v5z4" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.948971 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.948795 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-nx76" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.948778 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.948619 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-kbpj" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.948600 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.948372 1 node.go:157] "Pod does not fit on any other node" pod:="loki-dev-005/querier-burst-6b5f6db455-5zvkm" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-jjb7" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.948350 1 node.go:339] "no Pod antiaffinity rule found" pod="loki-dev-005/querier-burst-6b5f6db455-5zvkm" +I0507 12:02:27.947830 1 nodeutilization.go:274] "Evicting pods based on priority, if they have same priority, they'll be evicted based on QoS tiers" +I0507 12:02:27.947815 1 nodeutilization.go:267] "Pods on node" node="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-g7pt" allPods=20 nonRemovablePods=19 removablePods=1 +I0507 12:02:27.947778 1 defaultevictor.go:202] "Pod fails the following checks" pod="mimir-dev-14/store-gateway-zone-c-1" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.947758 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/node-local-dns-sqdgz" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.947727 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/ip-masq-agent-4gfch" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.947646 1 defaultevictor.go:202] "Pod fails the following checks" pod="loki-dev-ssd/promtail-loki-dev-ssd-4vnl7" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.947622 1 defaultevictor.go:202] "Pod fails the following checks" pod="insight-logs/promtail-insight-logs-9v8cj" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.947595 1 defaultevictor.go:202] "Pod fails the following checks" pod="netfilter-exporter/netfilter-exporter-zg8mq" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.947572 1 defaultevictor.go:202] "Pod fails the following checks" pod="mimir-dev-11/store-gateway-zone-c-2" checks="pod has local storage and descheduler is not configured with evictLocalStoragePods" +I0507 12:02:27.947555 1 defaultevictor.go:202] "Pod fails the following checks" pod="conntrack-exporter/conntrack-exporter-lv6d2" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.947534 1 defaultevictor.go:202] "Pod fails the following checks" pod="ge-logs/promtail-5g7v6" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.947415 1 defaultevictor.go:202] "Pod fails the following checks" pod="pyroscope-ebpf/profiler-jb6bw" checks="pod is a DaemonSet pod" +I0507 12:02:27.947397 1 defaultevictor.go:202] "Pod fails the following checks" pod="promtail-ops/agent-6v7lp" checks="[pod is a DaemonSet pod, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.947372 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/pdcsi-node-9dvkv" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.947344 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/kube-proxy-gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-g7pt" checks="[pod is a mirror pod, pod is a static pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.947315 1 defaultevictor.go:202] "Pod fails the following checks" pod="kube-system/calico-node-j55qj" checks="[pod is a DaemonSet pod, pod has system critical priority, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.947283 1 defaultevictor.go:202] "Pod fails the following checks" pod="promtail-ops/loki-canary-pfmwd" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.947262 1 defaultevictor.go:202] "Pod fails the following checks" pod="agent-logs/agent-xw47w" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.947196 1 defaultevictor.go:202] "Pod fails the following checks" pod="startup/startup-x2pq9" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.947182 1 defaultevictor.go:202] "Pod fails the following checks" pod="node-exporter/node-exporter-8wxwx" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold, pod has local storage and descheduler is not configured with evictLocalStoragePods]" +I0507 12:02:27.947167 1 defaultevictor.go:202] "Pod fails the following checks" pod="goldpinger/goldpinger-bdrkn" checks="[pod is a DaemonSet pod, pod has higher priority than specified priority class threshold]" +I0507 12:02:27.947137 1 nodeutilization.go:264] "Evicting pods from node" node="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-g7pt" usage={"cpu":"6826m","memory":"16564Mi","pods":"20"} +I0507 12:02:27.946987 1 defaultevictor.go:163] "pod does not fit on any other node because of nodeSelector(s), Taint(s), or nodes marked as unschedulable" pod="gel-sbdev/gel-4" +I0507 12:02:27.946977 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vpgr" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.946956 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.946850 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-lgmg" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.946825 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.946637 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-h8wf" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.946604 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.946458 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-9fp2" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.946448 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.946387 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-databenchloki-n2-8c6b6266-bz78" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.946373 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.946238 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-h8cp" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.946223 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.946141 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-4qc8" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.946117 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.946008 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-mpm6" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.945983 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.945840 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-hzp5" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.945794 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.945699 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-kqpq" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.945682 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.945619 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-nz7w" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.945607 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.945529 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-n9zk" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.945516 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.945461 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xwpk" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.945451 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.945377 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-s6kw" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.945358 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.944933 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-rpmg" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.944917 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.944782 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-lnlr" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.944772 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.944718 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-9ghc" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.944707 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.944613 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-hgdb" error:="[pod node selector does not match the node label, insufficient cpu, insufficient pods]" +I0507 12:02:27.944600 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.944448 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-hsq7" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.944434 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.944309 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-nrth" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.944297 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.944187 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-86xp" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.944175 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.944096 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-4np5" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.944084 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.943991 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-7vfx" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.943978 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.943921 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-c9px" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.943908 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.943820 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xm9z" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.943789 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.943673 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-c7rv" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.943650 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.943534 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-otel-n2s4-0-1dd3-b196a3e4-gvt7" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.943518 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.943376 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-thpn" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.943363 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.943274 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-g4ml" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.943261 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.943146 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7llm" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.943124 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.942945 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7c5w" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory, insufficient pods]" +I0507 12:02:27.942921 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.942663 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-tqwd" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient memory, insufficient cpu]" +I0507 12:02:27.942642 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.942579 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-j5wp" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.942569 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.942485 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-z6f6" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.942476 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.942367 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-885v" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.942357 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.942267 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-4jtn" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.942254 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.942120 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-6bbf" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.942109 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.942016 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dc4k" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.942001 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.941784 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-5nj8" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.941763 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.941615 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-5kz8" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.941596 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.941416 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-cz4q" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.941392 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.941302 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x94l" error:="[pod node selector does not match the node label, insufficient cpu, insufficient pods]" +I0507 12:02:27.941280 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.941066 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-d8sv" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.941047 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.940854 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-databenchloki-n2-8c6b6266-k4v7" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node]" +I0507 12:02:27.940831 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.940701 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-9jbm" error:="[pod node selector does not match the node label, insufficient cpu, insufficient pods]" +I0507 12:02:27.940684 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.940483 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-qqlx" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.940471 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.940330 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dtw8" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.940317 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.940191 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vpmb" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.940173 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.940058 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-t94m" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.940044 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.939866 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-kv65" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.939838 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.939669 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x24s" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.939648 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.939519 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-trgv" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.939506 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.939326 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-d6b6" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.939305 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.939185 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-qg78" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.939169 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.939025 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s4-7-1dd39c-6f2ad845-7lsj" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.939006 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.938934 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-v5z4" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.938918 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.938745 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-nx76" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.938730 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.938576 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-kbpj" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.938560 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.938424 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-jjb7" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.938413 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.938263 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-8wbq" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.938244 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.938073 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-t8zj" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory, insufficient pods]" +I0507 12:02:27.938056 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.937819 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-z2rp" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.937781 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.937680 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-lhp2" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.937667 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.937535 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-q8mb" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.937521 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.937401 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-fg8w" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.937356 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.937123 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-otel-alt-n2s4-0--3cf760c5-s8l4" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.937099 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.936967 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-vmrk" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.936946 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.936821 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-cd97" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.936790 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.936711 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-9lbc" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.936692 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.936556 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vf2b" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.936511 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.936363 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-g8vs" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.936344 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.936187 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-t2kf" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.936169 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.936054 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-c4qb" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.936037 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.935861 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-mfbb" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.935838 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.935650 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-8z5k" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.935632 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.935461 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-mdnz" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.935444 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.935279 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-r8v7" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.935261 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.935180 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-8n9g" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.935159 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.935052 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-k2r2" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.935035 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.934875 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7jmw" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.934856 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.934704 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-6qq6" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.934684 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.934591 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-q7hc" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient memory, insufficient cpu]" +I0507 12:02:27.934572 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.934484 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-hmlg" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.934464 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.934334 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-4x8m" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.934317 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.934171 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-tjd7" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.934148 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.933962 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xv96" error:="[pod node selector does not match the node label, insufficient memory, insufficient cpu]" +I0507 12:02:27.933943 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.933763 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-n56x" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.933741 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.933595 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-m294" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.933580 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.933422 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dd5b" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory, insufficient pods]" +I0507 12:02:27.933391 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.933134 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-5rdf" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.933119 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.932921 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-97ds" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.932903 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.932782 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-qmgr" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.932761 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.932560 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-q6f2" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.932538 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.932308 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-4hrn" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.932287 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.932203 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-cvf5" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.932182 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.931987 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-4kpb" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.931970 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.931845 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-h9bx" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.931825 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.931704 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-lw2b" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.931692 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.931523 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-r94w" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.931512 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.931361 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-995l" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.931350 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.931261 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-hww7" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.931250 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.931193 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-g9gd" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.931180 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.931024 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-sskn" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.931015 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.930927 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-lv5d" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.930909 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.930566 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-psgn" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.930556 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.930487 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-ml9j" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.930475 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.930389 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7zvh" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory, insufficient pods]" +I0507 12:02:27.930372 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.930181 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-rn42" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.930170 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.930060 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-8nfg" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.930051 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.929968 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dhmp" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.929957 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.929829 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-x9df" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.929799 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.929669 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-h9sj" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory, insufficient pods]" +I0507 12:02:27.929655 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.929482 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-h7jr" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.929469 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.929387 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-484z" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.929368 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.929271 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-7nvj" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.929250 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.928985 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-6lcp" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.928974 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.928859 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-n6g2" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.928844 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.928695 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vggg" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.928685 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.928615 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-8dwk" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.928605 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.928548 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xzrv" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.928538 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.928456 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-j94k" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.928443 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.928344 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-c9m7" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.928332 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.928224 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-6vzp" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.928213 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.928114 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-hcwk" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.928105 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.928021 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-mstl" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.928010 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.927915 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-lnkb" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.927902 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.927821 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-h8bz" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.927792 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.927703 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-ndsg" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.927691 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.927605 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-vdj8" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.927594 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.927470 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-8f8j" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.927458 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.927363 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-8tx9" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.927344 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.927236 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-hr4c" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.927226 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.927146 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-bkt9" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.927135 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.927048 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-j2vt" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.927037 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.926931 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-cache-n2hc8-1-1d-61155fd9-5snc" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.926912 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.926826 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-lqg5" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.926795 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.926733 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-2mdh" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.926719 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.926602 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-hg-n2s8-6-1dd39c-3bfd06e9-7pn8" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu, insufficient memory]" +I0507 12:02:27.926575 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.926358 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-perf-n2s8-0-1dd3-91689928-6p26" error:="insufficient cpu" +I0507 12:02:27.926345 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.926273 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-z9jp" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.926255 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.926156 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-v84d" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.926139 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.926026 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-databenchloki-n2-8c6b6266-2tvt" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.926008 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.925905 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-npcc" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.925881 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.925762 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-phd7" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.925747 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.925585 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-5lww" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.925559 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.925091 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-spot-n2s8-0-1dd3-f81338c4-7chh" error:="[pod node selector does not match the node label, pod does not tolerate taints on the node, insufficient cpu]" +I0507 12:02:27.925072 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.924975 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-2dqk" error:="[pod node selector does not match the node label, insufficient cpu, insufficient pods]" +I0507 12:02:27.924946 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.924689 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-dkbf" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.924667 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.924508 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-98xh" error:="[pod node selector does not match the node label, insufficient cpu, insufficient memory]" +I0507 12:02:27.924481 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.924273 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-xnql" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.924259 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.924073 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-rr2n" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.924007 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" +I0507 12:02:27.923883 1 node.go:157] "Pod does not fit on any other node" pod:="gel-sbdev/gel-4" node:="gke-dev-us-central-0-main-n2s16-3-1dd-9b502d96-n6p2" error:="[pod node selector does not match the node label, insufficient cpu]" +I0507 12:02:27.923860 1 node.go:339] "no Pod antiaffinity rule found" pod="gel-sbdev/gel-4" \ No newline at end of file diff --git a/pkg/pattern/drain/testdata/vault.txt b/pkg/pattern/drain/testdata/vault.txt new file mode 100644 index 0000000000000..498f3e43cc5f7 --- /dev/null +++ b/pkg/pattern/drain/testdata/vault.txt @@ -0,0 +1,1000 @@ +2024-05-07T10:58:13.877Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8fc47b8d69b6067dfef3bfc7ce32d9b79f04c7658699678d333a6fe1ec1f94d6 +2024-05-07T10:58:13.790Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hcae776c5ea5b05ed12d123dc2562db1e7a0457b8acfb9bde5ee43c8a3fa61ebd +2024-05-07T10:58:13.444Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h64ea582437fc72c1f5bb0ad92d990cf0224505a029c7473ed4af89aeaee340da +2024-05-07T10:58:13.354Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/heb36625ca83a5ec2835139b141b9c127de955e4752acdc212cd433249cfe76de +2024-05-07T10:58:13.041Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h127c7b053cd988e815c721b8d9ae7d462cc82dc8bd7b8796d5fd6a1897ddb449 +2024-05-07T10:58:12.892Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h3e511801fde00772f7135f0d2a204c9118a0fcf5aba3f2bf35aaefb0385961eb +2024-05-07T10:58:12.742Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/haf7504c0e9c156f2a275d03f83a003e85368f1c0607c47b445bf119f4368d7c9 +2024-05-07T10:58:12.376Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hecbb8e21d7714302d761bb441787989c3db5523f9cbd6dee24a73c2abe9d660e +2024-05-07T10:58:12.200Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hb3bd608d943755438718e0c34a518ce1c5eae4e67494946e2f915e459f1dd896 +2024-05-07T10:58:12.155Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h3a8e9db9cac94fcc521ac325e945f70593cb0b1e7dc0a900549deabfee88d360 +2024-05-07T10:58:12.044Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h7ffe2bb8a21670b4474c8a7e29a6c0514266122b6d29753fcbd7179142634a7a +2024-05-07T10:58:11.666Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h6faa2455eaad0db7853bcb86bc3f9c39fa54ec9028e26fde10328a7e33974618 +2024-05-07T10:58:11.533Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h8f24e52ace31a64056a9ef772d327b49a7aa01a77a9ca9ef67f31b028a6e2f0c +2024-05-07T10:58:10.791Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3bbef5dc1e9498ea432f3e7669d8afb5c3b19a7ff81e6d5726bb0e8ffdfcfcfd +2024-05-07T10:58:10.641Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h49ff65d9a54554bd3c310fafbd00111172b6cd06eac79e620c1517df66071b98 +2024-05-07T10:58:10.603Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h33025e568500d5ff6caca1e18ae8874b422f36e5929a279587aac340e7c6053d +2024-05-07T10:58:10.238Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h422773384d99d5cc0a524467bb8a81c58f72b005c31202f25a47a42f1d0183ab +2024-05-07T10:58:09.757Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/ha13f8124868add1318cbb847441934bfe6f8f6a196aaecf0231b5149e43e1557 +2024-05-07T10:58:09.475Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h6df7014dc3497386c386b90c5493c2639af1c341697a4cadc9513e3b15c1cd6b +2024-05-07T10:58:08.952Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hb3969fe694698b9f55f28bcf1c38483d7f4880523e7c61b364212bd4667a1cee +2024-05-07T10:58:08.630Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h610fb3959b1c780fcb42e62d9950fd0112ac6f2c1266a2b36df1239862c7ceb8 +2024-05-07T10:58:08.449Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hb11efa7e961a97c885d64dd58ef6a997b2cdad10dbf68fa586480963dc5a81d2 +2024-05-07T10:58:08.137Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/he2dc7880454b2f4998307b5a2e89a1934499c23fd753ea247fa20e393607d567 +2024-05-07T10:58:07.935Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h277f65be7734384672a94e43e87a932a87c4b33a26625c4a666ff165b1c791f7 +2024-05-07T10:58:07.669Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he0c84a7e402f2b7e15dbbfb0a5420a406937785701168952adc4886c600dec4b +2024-05-07T10:58:07.557Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h21bab40300ab5bf7d7216a068a57dbb486609bf0c86894d55336f193f0373ae7 +2024-05-07T10:58:07.435Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h62ef8fa328bdd53425e4a0ce26beed753a814200d5e1b73399a660c8dd6e3721 +2024-05-07T10:58:07.414Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/h6003048eb2f09a89027685ee1e506212688dbc101ebd533692bc9494d8d019da +2024-05-07T10:58:07.397Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hdea9032b7806150b16ea028d44925976cac71225869baee7dbbee6fb775502d9 +2024-05-07T10:58:06.539Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h0ac8fd86a3af52901229676138c66170e6ea225baf9098a8688673f4eae8a9bf +2024-05-07T10:58:06.491Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/h17c9df8bac4fb461ff6977b258bba1ecb1345c177cf55c01773c397b1b3f1303 +2024-05-07T10:58:06.382Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8d3b4023159c69199b0548eb38a1bf746c7a8e7c5d86785740d50208359b04e4 +2024-05-07T10:58:06.360Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h2e8df9fa1730d8ec6aeef1db775cc5f08dbc8f79ce56e0fea8bbd62064c362c3 +2024-05-07T10:58:06.324Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h9557e6168d3f9e57e2d2720329f095fc06057b2ede1b608b59e7fa2a35690da3 +2024-05-07T10:58:06.024Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h4b719010e884019b2014320db8aae3eeae839e5c95bea9fb66fcd82f251b66aa +2024-05-07T10:58:05.839Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8160b8a393e9e561023522f6e551e4e5d1c9247de4e7b46176a11434ee9aa8e7 +2024-05-07T10:58:05.708Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h43490a52ff0ff58827009dde665a5856744d44c6582b6b59feff862d9a4b7c90 +2024-05-07T10:58:05.336Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h0919d6075633ac3dc7884a1b965f07194b6284c2268ab598649472cf4a61ce79 +2024-05-07T10:58:05.227Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3f0418844cc060382b256aa11ad562145f27f34c20ac7905c041a1213c7d05bf +2024-05-07T10:58:04.827Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h6b2ea4980225e3a7c182e7055e776426f4370f44cacfa919e5c39b459f6f8986 +2024-05-07T10:58:04.315Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hd43f647d69c2214dcff6f020027bb3b1d7b60cd0e2af1ce23707f1c28d5a40f5 +2024-05-07T10:58:04.190Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h5843c3d7dc5bff8b42fd045bb1e1c2294537a0910f74f1438ac89ba038d6f5de +2024-05-07T10:58:04.007Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hf0e12c2e4aca2cbf4e4411e7a7e8ce53ec0efad2aee813d9acae295b418773c0 +2024-05-07T10:58:03.916Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hfbe844265790fcc65f3ccc40cc299ed3a78671b5517498fa8a0dc2904017486b +2024-05-07T10:58:03.749Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h5adf56b07093186ab0bae642aa906bd6534a0b9111f96c3ab241f590e77036da +2024-05-07T10:58:03.437Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h13d1a5fb038dd83733769fdc50390478c319d49e43c1e5c189d2a84a44cb3e90 +2024-05-07T10:58:03.245Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h5cc3d68c93b9b84df58dee8df1b9edd0332cba41a7a8a6a5d2b206cac587868d +2024-05-07T10:58:03.161Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h45f7d62f33ab25854fe004e85c71ac0ebd1665b0247dc0395b93f7ef40f2909b +2024-05-07T10:58:03.006Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/ha869d0c8b2d2b4d46faa227aa0f23bc7762d2b89c6945d76c974645e003cbbcc +2024-05-07T10:58:02.828Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h7cb9e24d53b4d9c9c71c9cc60dcf08fc2e74b87cbe069b7bff017b46fcdf7147 +2024-05-07T10:58:02.608Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hea6a06e26e7a6e42e2589817bf066e98812c6134d2190e148edb913ceb62363e +2024-05-07T10:58:02.482Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h342d43c47bc77d240708be8e8177009a65a6fff1339917d0fbbe3c445f13b6dd +2024-05-07T10:58:02.324Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hc0e5aec45187960f6cc5cfc37ce1596ec631187d6ef0d7cb7f1b11ba19a52553 +2024-05-07T10:58:02.204Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hf8a9a5617284bba896aef52d136f4d93bfbd2e9e8768bb2b0fd5d865bbb3bfde +2024-05-07T10:58:02.001Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h0d09bc1641809b47cc6e112dcaf8c25f5ee0b1668765e8e1b7033fc4de779793 +2024-05-07T10:58:01.989Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/hf55b420bd570419911bdf9d71b620f46623b6f70d5e49ea4ea25a70db4d53313 +2024-05-07T10:58:01.530Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hc763fafe67c0d332ec3c84ba10679c40dbc7106ea115e14e5106d79c27e0263c +2024-05-07T10:58:01.440Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hb6950875a1e913ab3458be3dfece7469735a8ef3513c77ca52faa0e915b493fd +2024-05-07T10:58:01.376Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h47be0dbf3047949762a0b2b694bf16bd6535cf0aa431804d24585c9f810e899b +2024-05-07T10:58:01.042Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h387c4e92b09c52a0cdf545b0bbcd14186f748a55a56d8ccfc6e9404d2b95e61e +2024-05-07T10:58:00.850Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hddab873685c76eec31e6501a72ba85d8e78c26d79b63af6b519e31356f8a9cb0 +2024-05-07T10:58:00.513Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h4c033188f654dc87827c859c7bbdef72ebf2460fb1c40aa000cf56ab76a94eef +2024-05-07T10:58:00.512Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hdd6b47e1f58a9370be0382ae162a2ccda6561c1a76ddc6b7392c627af5c9dbfa +2024-05-07T10:58:00.463Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/h1e0f3e7d39ba2e01ff0e61ae97688ef3a17215d8aacbb34751de8aa5295a4004 +2024-05-07T10:57:59.843Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hba66895f510704aaf6a43ed740cd2915d96e0365c04d3f64a137f829a99cbbd8 +2024-05-07T10:57:59.809Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h938f712ce928eccb6a086c56ac3bd95c1c1a6a71a7e79458820a5eebd3439b6d +2024-05-07T10:57:59.635Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h01add1c237551f52b6c261d7a4da08031deed10095c050f6f831ccc67e063d00 +2024-05-07T10:57:59.561Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he5a2ca90a8b4c81c73095405ec11455fd061482e4a51307ed56464aba5b7301c +2024-05-07T10:57:59.291Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3b9cc8b734753e19a49f2d13fdc7880cdc459d3c8ab21d60e6484a1488362fac +2024-05-07T10:57:58.974Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hb97816e80712e9bba252612464f657f9027bccaa7795c210d7eb5c019ef545cc +2024-05-07T10:57:58.900Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h5a650cdc0ae87bd5c0b255196d44b66156aadd406dc3a75c6d5638c2c2124df3 +2024-05-07T10:57:58.895Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8eea138ec711749f6d157ba14dc7edcd34d2085d506cc392068291ac52158073 +2024-05-07T10:57:58.766Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hf3bef9ab02728a80e088b65777203069f37fde4dbb58bae27bb8d81fdfd27885 +2024-05-07T10:57:58.466Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/h9d8e995030043364a8144b9adb013105ad6945057b0b3c06ad4bca63126327d8 +2024-05-07T10:57:57.912Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hde19ae02f7fa113bc27a95485b51f71f928f5c7b98392f72ee73a27a75f70da1 +2024-05-07T10:57:57.807Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hd73da5b4248c5b5dd552b0d6870cc1c64bc06130d6175b774c484d4e4c349a5b +2024-05-07T10:57:57.510Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8c0916dd3db1c243e7c3379b067f51471b327eda445284c533a0738c12855dae +2024-05-07T10:57:57.345Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h30b77f1a5d685b02785887ba055e66c007dfceec3812101f80e9d58aed1ccac1 +2024-05-07T10:57:57.084Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/haefc41d80ddd67d1cc4acb78100e81cd17adae308f4e201b2716ff74f2f61f38 +2024-05-07T10:57:57.028Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h626e4d519424ab8dc0cc0b0e85073c18b519ac5df644ee239a5ed7566d5a23f8 +2024-05-07T10:57:56.935Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha0bde2a06c1eeb95fcf0ee5436fad8e9912b14d204bb53fea7d24696ca649a7a +2024-05-07T10:57:56.526Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/h397e00faaff9ba2aba17916c8221b960edc9cc39957509ed3da92d546f19b0c9 +2024-05-07T10:57:56.316Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h77697840f65c1c9b2c1312911dc598ff429e493e7b132e5bd1b0b9c6f68e9328 +2024-05-07T10:57:56.272Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h55568d903125bbb654cc043af658930462b9fb1d2c521c317ea56ed32d143191 +2024-05-07T10:57:55.716Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h421bb32322e5c805edcf5bdc9e4345de648e8282da09622a84f30d5e6c1f495c +2024-05-07T10:57:55.384Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h9569e221882bf043dda232b9e9b90192a371bb7a916b72b0eb47fa577ae51166 +2024-05-07T10:57:55.239Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h52581986e2359ce7f30780dcc8a7b7fd9e3dc59963e656564c76767a5a12852a +2024-05-07T10:57:55.229Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h4934f5750201ae68a390c56781672ac4af86b37dffe89d58603b9bcc21f7c7ec +2024-05-07T10:57:54.993Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h34aceaf3dbc9bec9821b9926083caa2606908fd3053602693ea5780fb43d7a43 +2024-05-07T10:57:54.452Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h1d740727f4c297fa11ec970d9eeb128967c88e8181fe6c22f327add4657ee0ad +2024-05-07T10:57:53.866Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hf052b8a3a849e95ce2a11b49054ff9fe0697cf1734bda1bfe7df6c9d4a372515 +2024-05-07T10:57:53.663Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h09cff1364d2df542105a565e66cdbd5e873876b4aced829428600286a188e632 +2024-05-07T10:57:53.623Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hb94398dd59f6eabf8dd7e3d2a23c0c8d5bf69ca3ea326d343ea7dd78e24fb046 +2024-05-07T10:57:53.491Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h637ecdf20cc28060c0efcbcba937f5ab18dd295fe69e24eaba984284e997b293 +2024-05-07T10:57:52.563Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hb8382bc9ed9b93555effba1fe739f2295cda4626465f2b6f0d080613f3166349 +2024-05-07T10:57:52.221Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h9cf9786fbba2f04bd6e529e04716c9dbf103ba68d7c14aa0adc3b7e9cdfb9e75 +2024-05-07T10:57:52.093Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h85e1b4996e795c1352a3ba3946158cbfc4da14c823c2f889dd717997eaa4e670 +2024-05-07T10:57:51.837Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hc7c3e527a13aa08a213fd10c59431a95c8c94f0fecd1d581ace0767bc3fd711b +2024-05-07T10:57:51.754Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h918302f464e8d8c21b2d27be1b6b1c0218a5238b67a9eaa09cc413a2284cdcd5 +2024-05-07T10:57:51.443Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hbe3f6df4001bdbe9b8076e48ea47ceb8a8bcdd351e525e616aaa21f0bfb88788 +2024-05-07T10:57:51.245Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h7fe50828aebff0cd1fb060019b13698ed94c614b25993450b2f3c9503ebb45dc +2024-05-07T10:57:50.929Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/h8e523c07d4bad8bdde5f76f97223ecc3af5fdeced9ad4df54096da12b4fe3a6f +2024-05-07T10:57:50.580Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h15a6e8bf067fbdb29a88ae9e7e69fc1c2b5ce37e62b6c3129beeaa4ebe203abe +2024-05-07T10:57:50.535Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hffa20b65510c165a992e734e3e1b9c46f7c604b7b5358685d77a35bd8629d8f5 +2024-05-07T10:57:50.447Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/haa8805f36c50f41d8ca89a48a497b73447088950594d4993753148580ee80b46 +2024-05-07T10:57:49.445Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8db51052aff860709078baf3eb596970b5f2fc7a2da3a0250256621c20b65dd4 +2024-05-07T10:57:49.201Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h229a75965ff90463709433a17094b7247c7dc5c9fc1e79d712070d253d88cdd2 +2024-05-07T10:57:48.636Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h6954963ccbfb25f7d55efe1735a5a7328c89c5142c2ccd2fa562ccd4912bb091 +2024-05-07T10:57:48.588Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h4e3e90440cb9ac36b8883a59e4e9d90f2d87ea37ba23fbb51399be773b249132 +2024-05-07T10:57:48.258Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h67933a19c2d4722dd06fd687cb5af42440403a58993de5644184292e19230079 +2024-05-07T10:57:48.103Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he3997523edc091cf692431bc88cab212a18837a723bf56968f3dc37d16b52f72 +2024-05-07T10:57:47.509Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h852a896c3331e9b3ea2a866f86fae68cac13b37aa909b76da770e5ed915d8da7 +2024-05-07T10:57:46.829Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hc8473888eae263d0431b723b082b867345c0bea73e90a38645363977e6156c75 +2024-05-07T10:57:46.647Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hdcd706504d35d3be7b96c28786b8ba0ed4520ee7f51e795dea38ebac30966770 +2024-05-07T10:57:46.197Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h5b28ce03c20f7acc336a31000a989a413e782a176bbfe1de9d6da84f588b3824 +2024-05-07T10:57:45.704Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he1a98ef3a41e14f7e7025805b911b7fdecc01c2845991710a54222e4ede635b4 +2024-05-07T10:57:45.419Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h6f1e3ba00058fa8c1355b7ca062bc1bafc0c003d8d0f0b767e275ecce47b490a +2024-05-07T10:57:45.234Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha954c4174370681a2d2f6b6941cc05d6ebf50c53483f66031bc2986a85a70dee +2024-05-07T10:57:45.144Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h92d49d585142ee8066b9e4cb82742f3762eba8dc9a74efa0e57fb58f90313be0 +2024-05-07T10:57:45.055Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h61198fdeb64590c58e19c3b1e1cc3a07e54cea77e734045f0dda28baf6da76c6 +2024-05-07T10:57:43.844Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha7054b6531409d689ea23f7bc6372e4210efbcc69c4ace5b6fa354dbb40f4897 +2024-05-07T10:57:43.199Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hb88c9f41937c7ab1a9fa4984eef081aa7eae632352595c02b52d748177d4db16 +2024-05-07T10:57:43.009Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hf110386c992e570209b942cc1497da8c81f230e78fa154e7e94c94b8540ca9b6 +2024-05-07T10:57:42.033Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h027386f645d92f0210d0b35b4926f30974ee2ae5d0f0602592c217fe969a6b99 +2024-05-07T10:57:41.951Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h1df3262823b6a8dc81d36ae99c78d85b52e9bfeb2755c9f50892fd5e3e5120f2 +2024-05-07T10:57:41.843Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hc928d24fe32f85a1e7ab97a7459ae97aa00f9e86528e9380aaaa20ed89da7543 +2024-05-07T10:57:41.391Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hf4c3bd434cd3424f69330c31b3053c93e0029e30030e8421ec1fd723d1446b04 +2024-05-07T10:57:41.095Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h8486ad2ce0414c89b93a63ea5f2fe295ccceb74ac45e642193f754215b8be711 +2024-05-07T10:57:41.003Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hcf9b45206058f75a536af39d7847b094b1fbcac271e671f54d222b912b9aa6d4 +2024-05-07T10:57:40.893Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h128360f86ba44e07b9c0695ce4511e7ad24180bb4b2c2c5e3301b3e758cbec03 +2024-05-07T10:57:40.442Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h319822f12118579099598e1bfb0c16eea31d4e7e6be46e8b8c2e4f2d749c2e59 +2024-05-07T10:57:40.298Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h80a0b5e888b78d7945eb7685b9433143b686230cdef6e7c00e376ac5dd84bec7 +2024-05-07T10:57:39.922Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h55c5a71b0e7d603b0bd5371773a5208f385eb60ddc8c715fd3c7238369b281aa +2024-05-07T10:57:39.621Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he0cc8082fbbb26e65196fdf83bd4e0eb4f2982a1fb330f3be1f813cef8f3de8d +2024-05-07T10:57:39.446Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h5a8498da499728d42e8b4517d8b81f45b902865a93aaba24698b843d8a6bea23 +2024-05-07T10:57:39.215Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h288074938a6803bad38d925340a7e0b8055f8cf04ae25395ede4f72b76d4e273 +2024-05-07T10:57:38.590Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h71e6c7c34ba4647ad1e45a9a7d9cd8fa7353a4f109c61e96456d769d0b345783 +2024-05-07T10:57:38.316Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h521ab2b8226ff754d4cdc796974b5a41fa254b0095e0d6bdf710c0ed4a66d1b8 +2024-05-07T10:57:38.256Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hc0aefc7178b1b0fb103213ffd4d8600ebac0b779a5be2c02723f6bfe0e507552 +2024-05-07T10:57:38.244Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h3e9981208267cf45f45ec640e1a6944ada887fac8015f9ad0258c0a8cbb744b0 +2024-05-07T10:57:37.953Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3b49e46202a088f79d4b134fe2edf54b40dd96e5d0163583bb9790b5e64e7e5a +2024-05-07T10:57:37.770Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hcfffa9435478aca4602c16d1182e4e3c878320402d91d372c01b6b29e91e0436 +2024-05-07T10:57:37.713Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hd77f1a7e62428008b03f685e145cccb061f3f6a16f3905f9d681c57b78e1f7a2 +2024-05-07T10:57:37.436Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h35940df93cff65806b095dbb055d952c20243aa3dfd79aa2de02666cf5b97465 +2024-05-07T10:57:37.212Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h6753e0644bb6a114900238e3ee96bc6df19f90656dca9eb24cd4a2b868416e12 +2024-05-07T10:57:37.185Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h1aae1127f9a37bc4ca0004f076d368ecd83ddc1888d3b20ef23dac0631224ad0 +2024-05-07T10:57:37.006Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hd4fa00dac1a4bb6632dfaef21e8209a69af2e64f6edcf13c1b1d569224b7f71f +2024-05-07T10:57:36.925Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h53ee210a0d635e1f3d0919803b93346f4f610114bb7242ab3100e90ec9cad4b2 +2024-05-07T10:57:36.774Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h0c135c91ed5b01143c28ee3ef1254c7d135d252219387d7645e88c4f5c60635a +2024-05-07T10:57:36.505Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h4f7fc380b5ab2f79b5fdc149507f61270c92dceb87a7f0bd81e644a3ed6e9ee3 +2024-05-07T10:57:35.856Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hf83c559b27992f1d3d6c0efaf52c4ca559edc2dbf406a829513cf91615b63cca +2024-05-07T10:57:35.606Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h2228cf4b22dfc9e902ad6b3abee7c90981a1ac413e607dd8fab240e788281421 +2024-05-07T10:57:35.354Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/had4f387bfc5630aba23564e39728d263913219e792c7c817540e8fd1585e9d41 +2024-05-07T10:57:35.260Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h842b3f2caed13fc333bdf921be617a651faa8ebc9e9e3b73881531ca337249bc +2024-05-07T10:57:34.967Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hb9a2f3d8f657cdb53b6ecd7d4b6e0716cd61850f635501d5675c985544137473 +2024-05-07T10:57:34.880Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h02ab5c5c34b338052527f7e66b7c2f9c6366888981a83fe65c53bfb2c4c2ad0e +2024-05-07T10:57:34.868Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hfe5ff371a4020a67e9407967da98d2e7b58f9a029b55091428edd7e1b550fc7e +2024-05-07T10:57:34.811Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h0cf70455a24a87375f3e7b4155d3486e4feec500ce3348522392509a80e5c00f +2024-05-07T10:57:34.728Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hc7e25d20d2cf857fe65954c711e346c8abe18789784433a2868313aaccfce925 +2024-05-07T10:57:34.306Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h900a8d218ab8f0fd9b9b1c44e9dc2c7010b643d74222465e27524378d1b6718c +2024-05-07T10:57:34.302Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hefacc525bcafa415a3ea6eae581ac45cb0f5bb1a0a8194723daf70641c07be03 +2024-05-07T10:57:34.100Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h5d65c98e5958943f9dfd7842aa328229e2a5a19a2c6b380412bc5c2002db30f1 +2024-05-07T10:57:33.910Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha84ab28f2162ddd4934cbf068449502ccae476531b054f46c5a4460d8bbfc140 +2024-05-07T10:57:33.784Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/he507c066e01c3ce2e1f45bef6b27bb4fd123acd58ca7850f3d6ac9b81f55e498 +2024-05-07T10:57:33.010Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h578d607a878e5f8598b0b61fb4b11346e0f4fb54cf0969d6ae4e35f8c1b2665d +2024-05-07T10:57:32.632Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h998767ad1f7b1b8e8765adab985cab537dbd11d70251626454ab55d7361d8acf +2024-05-07T10:57:32.477Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h42808f2f6da856fc4bcf596c85af438145cc6a49798df3bd843144cd5d0f1ded +2024-05-07T10:57:32.056Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hdc9a9ddbe564d74e7a6e2feb96ef7a93267fc48b0d465c57518aebec2402d344 +2024-05-07T10:57:31.721Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he9136ff93bb3ed5ec635707b50dcbe270b6eb10fdf8cc71db70e4c743c4fd690 +2024-05-07T10:57:31.686Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hca350a758d7f926dae42d85c2302db0ff4a8b65f58e35c7c01fdac3b8e2bb6b7 +2024-05-07T10:57:31.641Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h7a3c57f55e183a60fdf66cf98812e2da8cc319f1d43b0774c8047800c8266a81 +2024-05-07T10:57:31.196Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hd408cfd8e114f9af9847ffc1c55ceb9315a4864a45869725454b15a509a2570b +2024-05-07T10:57:30.678Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h30ee196c6fb8dc00b24ead82b290be0906774853f292c628fde747c2228cdf15 +2024-05-07T10:57:30.586Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/h985c61cc9ca7b020160b7c03ef31a45d57aac96513af9bb36f3fc5f54c873338 +2024-05-07T10:57:30.510Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hf7d0213f53bb530f5721f24877d2d013af1a9a58bebadd2f5659bbcbb11bc05d +2024-05-07T10:57:30.249Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha3917b4cdf4bac7b83d210a3fb22ea8dbb13b790b30f84aa1d634a066e85e9cd +2024-05-07T10:57:29.624Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h2e8665a0042558b77c2bb5eb494bc45b4c293530f05ef41fe3e000d4f5e48b1c +2024-05-07T10:57:29.468Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h15b37ebcffa622298fe524983da160b2a094558bd78921b5c16b12745f2f2d1f +2024-05-07T10:57:28.850Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h809052f539bfe4ea21f13aed145771e2ac93f60395b43bbca0558a257a1ec582 +2024-05-07T10:57:28.677Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h9d26f6396ee2da420e2f6e7ef2fe985c0d2c95819bef35ea3a19181fa3c05c62 +2024-05-07T10:57:28.053Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hfacb838d825f77e2b6298304cd25a7c3adaf3edabcefb7b2e1aaef6449c7bffe +2024-05-07T10:57:28.013Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hf5a023aef011f05923683cd3274c8267526845ff847ce017e53dd0daadb21268 +2024-05-07T10:57:27.913Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hac04d03318abc75cbcd7ee663905f9660d65519c1a01cb1cbbdf5911dc7d4bc8 +2024-05-07T10:57:27.791Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h9ec80c0c1563ac26d0110b36a3a64a96ba605f8af0b372ed66964e8abf9b33a4 +2024-05-07T10:57:27.633Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h27a02440c89ceb185969a40d72c28d2d6ceee94b8ee0300d122350ffedc11514 +2024-05-07T10:57:27.478Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h741118908a5a2b1b62df7e2b433f76ab52a0c1d19630fb471c6b7ddc3ed7aefa +2024-05-07T10:57:27.465Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hea27dc6397ab3b4dadfcf8ac54bff85cdbe67a7ada45abcccf56adadf52f5bfb +2024-05-07T10:57:27.030Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hf0a298c05caf3df8f5c936742858ffad67f70e6fb67ca7a09334c82cbf4d8eaf +2024-05-07T10:57:27.011Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h0dcd2ff5222bae1c0f5a928ac018c794ca47a9eb1921c30318427434f934bad0 +2024-05-07T10:57:26.839Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3ef9964c876f7ff1b8da44133f3c6c7da66fc63cf328af583c633ac29537637a +2024-05-07T10:57:26.684Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h04c402a5b16a85698ef56fcc816a353757ea999a26799ab7d7d3ee0bca60b545 +2024-05-07T10:57:26.567Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h24f4d3085819c3819f342441be07eb916c816ae85683b3071c02796a66320625 +2024-05-07T10:57:26.565Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/h4a40c0857e6fbf53260ef20e29b04cf8eabf0381d3963573daa88187b2038ba5 +2024-05-07T10:57:26.456Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/ha61a882536b93ff1e016499d601ddd7e12a1fdd81aa3affe819acbf6a60f6c5f +2024-05-07T10:57:26.336Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h0951e1bb2c82978e32140892794a771365fde885ca5a25cf7ca0d4af4eea5bdb +2024-05-07T10:57:26.215Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h77a5038bfc8a6b1f64d53d8a8352bd20f6dec94072627aa73b8fb1dcc68aff40 +2024-05-07T10:57:25.883Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h5805cf40cbd2ef176aa6d335bd7880913202f97fc7ab281931efb10df68a7983 +2024-05-07T10:57:25.724Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h0ec1efa77be71853ea0f53eda4255e77eeb9a7097249ffe45f297a6a7a28a3e8 +2024-05-07T10:57:25.671Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h95750f3469992a3189e5160fe0556a9d2c21ecc62f451d86cfb65ebc1086bf96 +2024-05-07T10:57:25.433Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h909e928db715b4685e5972917717a178e538d7d0f6a0c13a5b697deace5e9213 +2024-05-07T10:57:25.427Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hac0f59c07f266da807b68a659e1b220f49fb22eff07f56c771b7b4710410915f +2024-05-07T10:57:25.233Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h47731815f8fbb2ad17756783a2ebb029876144fc53dd2c8c422cf8267f22aa34 +2024-05-07T10:57:25.060Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he1d96a53732beb98b167a42906a43a26e32f188d30da4fa6425ec1bdbeffab5f +2024-05-07T10:57:24.691Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h9fee0ca9c28ed1071a697fa751de8bf4abef2614eea5437bcf0cd32e5aa643f6 +2024-05-07T10:57:24.311Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h708c9ae9d4fa4ce840ae07fc8670523e7b6ec6735c336ee0917c5d183150021e +2024-05-07T10:57:24.225Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h0a8273c76767c9d9539e29ebc8b22ff9693b0ebb3c4f1ec98c7d3e17fd64df9c +2024-05-07T10:57:23.804Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h72b5a9b1cba3cf91e8488f1d75265a3c9186e64cfce8cb26294362c1d188a89a +2024-05-07T10:57:23.737Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h826212dce8673fa930e0ce18b85d52651dc3910f3a5c0e11285ee24d3361a697 +2024-05-07T10:57:23.430Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h93541017c7da856068f10449987707065a69456df300a2607f35dc0fb461d505 +2024-05-07T10:57:23.119Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8b8288b573043f0bd59a1c6a723490daf040cf564f85166865705c1e67e5c25d +2024-05-07T10:57:22.645Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/hbeff438d49f07901b8efe9056d21cffc9df896423d7455f061c2df75ed50e280 +2024-05-07T10:57:22.023Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha334ebc1138df734e5f3544811ab2abe0a771b41d21292c2a5c28708ebb887b7 +2024-05-07T10:57:22.004Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hcd23d9f63870ee35a942645460074361da3be951ebaf950711bdd8279670533f +2024-05-07T10:57:21.906Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h13065fafd5160a17a45d52cf8f01703fa7a72a1e0766fc952f199ed653b79fcf +2024-05-07T10:57:21.551Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/hbf2332104491efa0d1f3e53aee551dadb2f768463dcdeff40794c9d48fe56cf2 +2024-05-07T10:57:21.545Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hf7e0889d633a8ebb5316ded638c8a7a319fb16b4cedeec0463af87fafaf810c5 +2024-05-07T10:57:21.403Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/he04c6c39761904c9a037958b4d927499e13a20f79249bdd492ebfd1e50f4adfe +2024-05-07T10:57:21.298Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h29105cc690d1f11818c3ff193e14d24a126acf151829b7c1274eac4a3108a309 +2024-05-07T10:57:21.024Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hbd93ab40f0a719c340fd3ccbe7c1b5a05988f6aa916a2259b586b304fc2c5495 +2024-05-07T10:57:20.655Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h5d370c430f78d2ebad866e731c663ce9a2fb7cfa52a3c7b93906b2ef4cee4bf3 +2024-05-07T10:57:20.213Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h2e4195cb8109b78cbb27cc3195c30f8ccdfb1e62db20356b1c9d1c75b39b4edc +2024-05-07T10:57:20.035Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h164bc765b69e4fdedf5c2572125be13d71eb185da668679a07a46d8de84c40cf +2024-05-07T10:57:19.820Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/hacfd55c7f9cd8511dce5cad7246817e2517d95e948994e2ed442f03ea3386085 +2024-05-07T10:57:19.743Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h8e2fb16334a3e251cc9e2019d0259e367a1be79d63618563497b4d08d4ae6f4c +2024-05-07T10:57:19.517Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h659cfe8ef7a4af4fbb21a055388349bf48319df277c1a39f2ce7eaaf0aaca1f0 +2024-05-07T10:57:19.455Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h44de4261c0eb2aa2f20eb3ed2bdef16c68cf18734cc91560823adb23350bd1ea +2024-05-07T10:57:18.973Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h5bbf8da255c16de881df0d44d496aa44507adfb032eb4fd3e5e59d689a26b0d9 +2024-05-07T10:57:18.968Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h76920bdf54f7848fff693bb6608170b0916d274ccb953e6742f45ded59c521e3 +2024-05-07T10:57:18.946Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h774d3bb7664a69dcc11ca47299d8fa11728cc76dd8214a9662a356e0a309f330 +2024-05-07T10:57:18.567Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/hd679624d9886db160c6aca8fb9980aeacdfcb06ea79684b7b20548c75efd1bfc +2024-05-07T10:57:17.974Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h5244f125c60dd74c310fee7a8439392118b25b0b6298dda18e85c04387f30f3b +2024-05-07T10:57:17.831Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h46bbc6b64ab28be03778a342d651df83233b556b8c5e5afd578d3212cc40fe4e +2024-05-07T10:57:17.778Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/he29005c044a6c016fc310803b3a437aee44d6ab1de83182dbeb3c00fbf445ed1 +2024-05-07T10:57:17.775Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hfe854f32815900bad380145a4995863ff94d224f25f7169571c850d8f7c496b6 +2024-05-07T10:57:17.519Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hf5aced80e392f8931fb82c11c463036e7ad0fcb74f4b11a1b0e4bfdcc1da2e93 +2024-05-07T10:57:17.254Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h4546a84e0f923c996cd2573fbb5527579225070538938ef61e9bc7e0d76d6d41 +2024-05-07T10:57:17.074Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h425e1b3fd0940799f375b27d5b20b98c1dae39fb831bc2c92d2af367fbe6ca46 +2024-05-07T10:57:16.991Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hf5485be58ad980b20405352a0f401df7918d1616613a7f9b14e3888df2447620 +2024-05-07T10:57:16.529Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/ha0d04254aa13f1f3148f91a3a81ca92e7b7af6859a5da30ac0dea8a25dee1744 +2024-05-07T10:57:16.278Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h553352db5538ae16f8c1c7ae69b482a83e886af4c51ac6b3d2e69ee51cb8d6cc +2024-05-07T10:57:16.156Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h53db9cbc35a323d5c32c7664098b15026cf36bc688624a1adabb6c97b80caeb1 +2024-05-07T10:57:15.995Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h2ef99ab1eb2a82ac80e7cab3b8d868ea763b5da4fe4275095569994fd4b2add7 +2024-05-07T10:57:15.869Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hf8e9247de0a6ddab1dda1fcc1a9b7b93abf79c3fd90a715e5af4d607ba9ee3d7 +2024-05-07T10:57:15.808Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he0ad9cd67e01d949bf7ac478bd07618d07fe34cd7418c561cc19c7096b59a58c +2024-05-07T10:57:15.482Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h4fa6721ceeb58fd0cf76b48f4f24bd2722e1414665db4f8a24b6ebd38ee00956 +2024-05-07T10:57:15.232Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/ha7fde22b31039edbc40352d3749fb2cacb4eacc1f771172c5d2aba99779af10a +2024-05-07T10:57:15.230Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/had0ff91905cc551f2b3f349472460eb0d2fb340fd2fbd6e1800110ec1be07cb2 +2024-05-07T10:57:15.067Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hbaedf22ba72571f67c906c0d401eea26658624d5e39720d6e79fb8bfad69e3b0 +2024-05-07T10:57:15.026Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h2e2717fd4c0458cd3ffd766438923b113d1ee1883e5b275a2d75bf2c2e87a30d +2024-05-07T10:57:14.840Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he8d93572c721f34db7d6f0a5a92c51837add5b116aceaf7f0a66821e09fa5bca +2024-05-07T10:57:14.810Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/he8fe7ea29487271ce57006678999defd1010a37ce5635d67a55d90c5ac951d65 +2024-05-07T10:57:14.313Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h07b93fa257e0a71424efba0b2d9d5d4bec41c6660d491faa876ea36b7cdc78ac +2024-05-07T10:57:14.286Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h768a52ee4f256bd3aebcdd69d3d0c51ebbcde5159e1295eaa5e72037e2ce8d71 +2024-05-07T10:57:14.160Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h84e946ff749c5d3a74268d5f7235129bdf69ac4a885b659979e285048d7581a8 +2024-05-07T10:57:13.950Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hfcb7f483483f2a4554f541884194f90f8f44dab648916263f11a62733fdeee5c +2024-05-07T10:57:13.822Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3106db0e3e6b8b87d3554f8b47c9eee8096706e870d3d21912bc9e75ba3454eb +2024-05-07T10:57:13.611Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hf67e434c05323253c54c7103c4301887b38cbc0a81834e7d701d56a54b3bc1ad +2024-05-07T10:57:13.547Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h05ecb40aaaf366682d1e2b62275f4de156c148c479062a86158839bc0cf7f78f +2024-05-07T10:57:13.508Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h7d1a944f4841375cbd51346bb5b7e98f2fc05e24335bea541416ee40e520b5ac +2024-05-07T10:57:13.463Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/h113709a272d81f6235c645783a80358701e14daaa8e1f0244b29d921ef8424c8 +2024-05-07T10:57:13.443Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h81eb06b5156c6a52ff6d0f4a7b896cea94665f210ba20eb9f8d18dd12675039f +2024-05-07T10:57:13.374Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h77b97a6e974276a23a58e986c351d2cf52d1d86eb380c3fcd9c4e27076d5fce2 +2024-05-07T10:57:13.047Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he7164f2beb4a36e48457cc9be3054f7909c79af2a028ff7c3ee54dcaf5bd71cd +2024-05-07T10:57:12.957Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h2d28298b11928df3dd643578c79066f493e5912c54d5b9c4b357677fc6e923f1 +2024-05-07T10:57:12.938Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hac2a9fae7e0b59a6abe9dd0d02323b83fd943c1841eb7e3af3d93f909de45209 +2024-05-07T10:57:12.853Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h8d598a08161c538d20806f6bac00997fa8d59fa8d6c664dfb55a5c23767bdf37 +2024-05-07T10:57:12.743Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he42db5344169c878239024949ec904c762d5a97b6f33194220c3d90b72d25585 +2024-05-07T10:57:12.643Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hea85e80b56f7daa656eabeaec863364f9ba35b79113db51f7c8f90a2697918a1 +2024-05-07T10:57:12.400Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h5bef4b357802ebcf0cd04c21fb02d00274ddb91d4a79e2ec6ac7484a32a476e0 +2024-05-07T10:57:12.343Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h5b3fd4932007d329369bc79a736bc0f3db3d2644dcd434518cb4e7c45714a4cb +2024-05-07T10:57:12.262Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hc33210f6573efd96d213a16c62e0de0d39a7d2806d7e45740da83d83a2940fd5 +2024-05-07T10:57:12.214Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h5bfce62cc1df8ec0f4ed0b0c74246992f6f1284de60908e337988d55f2257e4c +2024-05-07T10:57:12.069Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h91869713332957a20fd6ac27ee1e228c347605a918a81f6b9ed465075e0a1771 +2024-05-07T10:57:11.889Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h1ee8efdc3b685aaa80822aef757972619f99d953648bc83c06e3c5442b2cbe97 +2024-05-07T10:57:11.866Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/hcbe970209d10378e3937628ac78af5753721dd49d9a56cc892cf9c7b96e88085 +2024-05-07T10:57:11.274Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/hfcdf3b5cefb1408c3e34bb8c4708f1d026876dd31af2951d39e130fb2ddd8fc3 +2024-05-07T10:57:11.213Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3b46b3c02ef2b0a9b0dfe0e95d15363313677afca93a5105cbdcaa08b350939a +2024-05-07T10:57:11.091Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h90c33ae5998b2a2cdb9e7964eae819878fe41fd4316a6ddbd3f2c5a68c995d12 +2024-05-07T10:57:11.070Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hae39e153cea3985a9f854e37cd3a87b9a93063f488d3c8adbe8a73e9b242e8a1 +2024-05-07T10:57:10.982Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hbe93492e66819edaf48dc19f372db1019ce683ff5e5fd86485126eb905c96ef4 +2024-05-07T10:57:10.329Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h5c63baed3fc0c8f54ec6cdddb6b867e8c199f32cc136eedc95f6185bbdfa9dfa +2024-05-07T10:57:10.312Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/hb7441b4e1c0c076482de116a44aea185ce4ce779739f8d512455f908407af9c4 +2024-05-07T10:57:10.171Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/haac05791802fecf479b58b820ba7953a142246036086e7e180ae66ec0ddb3cb1 +2024-05-07T10:57:09.869Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h143c5dd58af64f8c17208a585ee06c32aece8aaf8ba1dd0bbaeb9babb5452865 +2024-05-07T10:57:09.862Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h9346c686d7b046cfc39ed40e5b700d9348c2c355283e0b006deb453dca6bdb32 +2024-05-07T10:57:09.506Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hadb24a2dbbef471804ef9f4d4a3588d678e42e695118f019be1a815a843d4c46 +2024-05-07T10:57:09.470Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h5ee2be3f5cb6d36003ba71e653bb7aeb530051b256e295140eb7f1e179f4c4ea +2024-05-07T10:57:09.470Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h1a6e9c95b031d2b80fa38c20d4574846158dc55a94509d9b3b1140aef20f20c2 +2024-05-07T10:57:09.382Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h1bb545ba82b6127af52bb98a423b745a62df2c472f15b5ef1cc04ceb9a0f4b30 +2024-05-07T10:57:09.382Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hd457f19cadca2be7d51ea05ed0d5d015af47317133a6fd6244ea2c884fe007c8 +2024-05-07T10:57:09.379Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hdf13dfae91c55af8cf2bda6fd2a56480176320c8bcb521e23fe2908241e1ca74 +2024-05-07T10:57:08.806Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha57d69ba26f68738852b0522a68af3a57f43cdd802cb945a6e119175f2eb624d +2024-05-07T10:57:08.601Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h84e507d2ea3b2f63c73c3b6df06988d379ce168693bf0ef821bc1c82edd42d2e +2024-05-07T10:57:08.471Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8b3aeaed6666aef831a5569756b7b749b7f954fc699e06e5398872a1156f8465 +2024-05-07T10:57:08.451Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/hf6aae44ff3705ab197ccd4161fa970e670bd48980a1cd1b889ccd500f477a1e3 +2024-05-07T10:57:08.223Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h46581efb0f338caf175b7030286b02ac2ae21ceb3db1ea8f66ffaa6217292722 +2024-05-07T10:57:07.984Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hd80bc8157a8ede525080489dd8c9172e866f6968e9fefc007212fe23bf99135c +2024-05-07T10:57:07.877Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/he9f2ac4e83355776fd1900d413989dbcf65505fdf494aa5381a48afdae00ad52 +2024-05-07T10:57:07.824Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h71c3744f8559d9ba54306d233f25676042b7462770b821c4e87c05ec28316e68 +2024-05-07T10:57:07.524Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h8d4d1f51e99459fa5f1ee1cd522c6ae2584261c798bf38ab7e96fa6c3dec270c +2024-05-07T10:57:07.358Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h9b37356368a4007060a04e510a0b91d547f54ac254882d080a05b65d24a5e118 +2024-05-07T10:57:07.203Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h806dd9e8ae7e56d54a25c19df1c29da6365b9a9779eeb2b5ad0e6cf4c8aa1d06 +2024-05-07T10:57:07.108Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h8a466d58c3c8ec211a083e1dba576b88b4678793ec803dc40512d53cc23ac46c +2024-05-07T10:57:07.069Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h46caff0ca380180841a262b3c6f391cf477f3b97530b17ee9961e45d89adc0ed +2024-05-07T10:57:06.844Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hab1b58c8f6fbb065b35a8d0b43cf2e3678b7f14faa6924d60fcc4920a2a0e72e +2024-05-07T10:57:06.715Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hea1207ec4e10aa36d088038bf9b4f45e613414e4516bc97ee05805b32b9a73c1 +2024-05-07T10:57:06.668Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/hc6392f2b252fdbf6d04dbc8c8a1cc028d1cbcdf00356450ae53b80a35b4dccda +2024-05-07T10:57:06.504Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h4b7254ce4d223d62b8278016db8f87497aad4a07fec8ac2aa6d320fc2d9e0ee9 +2024-05-07T10:57:06.466Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hbef407dbe527568442f09e4a509f64bf9e8b2cfd2d8f6f3d07d923d6e72a4d40 +2024-05-07T10:57:06.350Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h451ebb32aded17a156cdd1691f098519dbe959148d1f53dc9216ab790f4fb244 +2024-05-07T10:57:06.297Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h5fc2ededb3f6189d68672f99d6766d6f5902e3135d5febfd245d5e7789e6c052 +2024-05-07T10:57:06.013Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h2195ace6f057d6f40c930fe83233900cdbd74efbe3db3d381ba88c3fb45a52a7 +2024-05-07T10:57:05.944Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha42ff2c8458f936b1ac4063febb1af5bd98b030164f98737d3f0beeea7aa29a1 +2024-05-07T10:57:05.678Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h11adf5ae0899aa65f2b4d0a4ed5c882129d925934b46e1e2c8fde30cf7f31cbc +2024-05-07T10:57:05.439Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hcdb397499407977cbb6e85261e34260f162545f2f615e30c37382327e6857887 +2024-05-07T10:57:05.433Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h9019dc84d4d482f170fe277929a1facdd0e6dd49a9bf1b4730e1972487f1b7a1 +2024-05-07T10:57:05.398Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h41b4383a72a80047e45679a54273a8881661a2519e5201b752327dd6b5773d7f +2024-05-07T10:57:05.305Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3c132945d5f6a8184ede1dcc2ebaf59feb0eb9380c08f6e065ac89c42fe00a48 +2024-05-07T10:57:04.938Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hb4559c55d2f6cca12c8b176d2e73466d13be1d7137a737e70997625df6dc04a9 +2024-05-07T10:57:04.837Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h5c3acf376fbc92bbc701e26c56f2166d208eecbe6898de2822b43e72ad196afb +2024-05-07T10:57:04.724Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-oregon-0/login/h3a99d35fd5422ebdc0f33c37b8431cc80fe4417aa8ccb69be37c57b1a55f8076 +2024-05-07T10:57:04.542Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h947985bdc46cb034cfc4f1a99e87b4ef6fd59d449a3001e5e982441d392890da +2024-05-07T10:57:04.455Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h512624c2303938edcb42adc23bca8cfdc4e625b4259f64749e96bc52c0bae8fe +2024-05-07T10:57:04.360Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h87f6fa74ed2ce9954aab5e01823022ee938da5fe24d34ff4ef91ce21daf997f5 +2024-05-07T10:57:03.853Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hbacb488c52243fa600d7c174291f3606821393b1620f794faa915e722e12a94e +2024-05-07T10:57:03.841Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hc729f1f42911bc170f0ba1e14aaeb67ed9a826fd93f1449b0ba540b17ed20810 +2024-05-07T10:57:03.675Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h62b460a4c5eea0d8e6881167eb8e87531b70edb259a8f889ba0a14cb00ca5592 +2024-05-07T10:57:03.543Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hc2052552d6c75dea8133a0fbb4267c406106d949bf5a0ca3ca9b525621583098 +2024-05-07T10:57:03.306Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h8fca26d482d523a47d9910a835a41284978e038c90f2120f643df0ea66a0f6e6 +2024-05-07T10:57:03.304Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h832539052dfb2365d3528838c0557dd01f70926c7e61ba80bff215e174a764d2 +2024-05-07T10:57:02.783Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/haaf0f45ef161568708f40a67637a936eaff3f71abcf8d68962bdcf17b59a459c +2024-05-07T10:57:02.017Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h2202aee0517931d81808f668e38cf93de67b335f65e0fc2935e0002e70d70be8 +2024-05-07T10:57:02.005Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h303d5de51148e37bb42f9531a8c1e906929eb4e985a63b05541153bc01fc0950 +2024-05-07T10:57:01.823Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hbc4f3269cca789fb4027d5e81aad0bbe4136eb439fff1520fdfa484c75436296 +2024-05-07T10:57:01.751Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h6a89c6a9bf61919f0584cdd79f02730ebcc52593df24be2271d9d6077b46e0ba +2024-05-07T10:57:01.555Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h121f7042ce28fa0e61693f3801df3e48edce083da402751ae224b2efaefb8922 +2024-05-07T10:57:01.330Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/h259cedde4d3a7f1c2a172f00b59042385dff9dfc1f2c52ec400e7cd8f06c5431 +2024-05-07T10:57:01.316Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h4158c69c50300479779306a9a08de846566a0fb8985ae6e51839e6b2d653a0b7 +2024-05-07T10:57:01.173Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h42448771f6af4f6f0c2989d6e4c142f3ecd44dbf97e79d3099669998d5cf298a +2024-05-07T10:57:01.068Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hd1c570fad8b2c79c8af4b4028969893b69a56d07e99b5f44febd2602f6b3cd5e +2024-05-07T10:57:00.773Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h4266870b84c122ab72d7a12b102b1e9cabb159bee1f9bf3610741fdb39f03817 +2024-05-07T10:57:00.688Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h26dba340d8f2fe599557ec9e43174f0defe940e7458a7c8964d0c7c51e99092b +2024-05-07T10:57:00.341Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h472c9c8efa4928f964371aeb13337876082766c73be9a0027c9a37e814ba4c45 +2024-05-07T10:56:59.995Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h04e8ba3c341764073f6a74078fca39f6fbbc79f9e0195e48e7e57d16a963c683 +2024-05-07T10:56:59.963Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h4ad116961d1db21d0bf633e1c81e891fe56b7c0beaa4d888eb3f61195ae4c9f2 +2024-05-07T10:56:59.867Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he2f04344920330bcc6ef353a3dfbde0e4a97e03f9fd4fa26b923aa1bd95199da +2024-05-07T10:56:59.622Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hdf97385307f8c7275e05d6930d1102733d1bd3b2d014c1fad8be38c833aae898 +2024-05-07T10:56:59.557Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h34fca338d6e70842e9625a32c728f390f0d30566f7f45bf07e0eccfe7ea637be +2024-05-07T10:56:59.188Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hf7fc470900dc67d8a63938f479d61a9b406f5a7dee73cca9c5ff44e00164aabd +2024-05-07T10:56:58.743Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/he597c4b00e4d27190e0dcbf18cc51abb0ec385be58f8f66c31e8bbcd067f042b +2024-05-07T10:56:58.700Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hb9301b7b4ef85761139ab5f1644e53059e0d0e0f278d3b9b7e1d80107c2ebf0f +2024-05-07T10:56:58.445Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h116e44e7661430ccd0c601bb55ae11cff7fb8c92585d43c032e1b3397bd3da08 +2024-05-07T10:56:58.438Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h43ac4468733be3a0bf91fd28f0f1ea19e825833873baa1b4aa57766012d5274b +2024-05-07T10:56:58.402Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/ha20b572d3e3a0c3ebf49ca8468b9b51335e7ed3871567d9c337014ea1badd652 +2024-05-07T10:56:58.225Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h6d1124c20b3a5d8c4cd28e6beabbc65ed747d68b5994b918c419f9a3827789ef +2024-05-07T10:56:58.216Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h432181dd6bd6dbf73c92d5d8184dd5d5540e55d6ae1dea5122b04556d435bc6a +2024-05-07T10:56:57.944Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h821f55e97c726780e57c77905321744ce6e1e1b35084a509404aa846c45dc752 +2024-05-07T10:56:57.217Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8db5c74fbb7bd4869571a655099f0ab369c438ec8c23dac2dcacdcf135cd64e9 +2024-05-07T10:56:57.057Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h26a83c08069b803b62dad5244b55ca0a36bd46b3759b75dd81997182a547935e +2024-05-07T10:56:56.993Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h6f78949a2005a88165754ad48e857aa42314c6d86a3b988c9af168a06bb98160 +2024-05-07T10:56:56.832Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hf0b34384b6f29249699460f8e1951be3a2ca4d5962c51ec470eb8899dfcb8818 +2024-05-07T10:56:56.493Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h916beffaf8151a45f45c18f70e928d9108276f55ee1a8b9d5b1be52adfd81470 +2024-05-07T10:56:56.246Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hf6938eb09cc6846504a26e1233484427d680733da0fb0d2a8a55b3eec3fbb4aa +2024-05-07T10:56:56.042Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hddd53e71cabebd1d0e41ecf16b07e7b3bea95919bff28240924dfa36e04930e7 +2024-05-07T10:56:55.571Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/hbe2413ece393aa200dc236c9dd1d67b3706c4f44c814955c659e076db4bc2f5e +2024-05-07T10:56:55.345Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h8bac472722450f9f89b2d983a21e779f97477b72db7d71ab108dfe1cccaae255 +2024-05-07T10:56:55.209Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/ha3c2930ce680d715907687b2a6f5eaebe3155b621c6bfb20392339c6b4c79d1f +2024-05-07T10:56:54.837Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h70c8bd78e74972fd1db2a117e0dac773146a8adfb9fa1cc7d1dbcaf0e01e04d0 +2024-05-07T10:56:54.273Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h2c1c700ea8de56e90dcfabe91d44fbb33eab6214c2cb1d3a549bec550ff89986 +2024-05-07T10:56:54.100Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3ef763b4e117cc5d1daafd0b0b3c19acdd92546a59eadf75abae544bd120c97d +2024-05-07T10:56:53.685Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/he6277adb744159455b0af2041dcead05110d04cdedff71ad1c2cccce37ad9f4e +2024-05-07T10:56:53.673Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h8084ce430655d9d55d6fa565628d785aa937aa7913e5e8da690881c4e7766f8b +2024-05-07T10:56:53.619Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hb0eed5222e73728dd6e3243e606b0bff3320e1f0ba457dad76402ef6ed83004f +2024-05-07T10:56:53.500Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/he27072ae296115163644e26dbefae3d14ab537d75b7ed8897b8357ffee35631b +2024-05-07T10:56:53.482Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h7af22e0a65c8949871f61f96aaea69a0d9e8e92cd752846076333483b6a0095a +2024-05-07T10:56:52.839Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hf0e9fc4536d4a1e216d2b23c43ac4582588fa82952c5e174dace8d6e44c66810 +2024-05-07T10:56:52.713Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/he574010b06c6ac95bea230769f5842e1a4d0ade1a6f83582c801e800a209ee93 +2024-05-07T10:56:52.688Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hcdd07b01a295b5c78017008fe944040f9474f0d105302fb5e501c82565ffbbb4 +2024-05-07T10:56:52.501Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hf63e73544418d5823eedcdd365dec7bcf53ce789ee1ef4fb4f88c6b4c1f1f568 +2024-05-07T10:56:52.202Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h7df1ac4c01aa4ab18517123fcfead3ae1bd6b0286575bb41e4090917cdcbb0b4 +2024-05-07T10:56:52.112Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hb2193eaa08e339e671c033defe1e237a278d7ffd8a9d951d55c4ccd980f56113 +2024-05-07T10:56:52.073Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h0689c0baac09e6ad289f0172384097653f389fce3de2f671ed69b2d90ea5a534 +2024-05-07T10:56:51.640Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hb80f9e9d15e0ada6e1c31f3a527b40c1b9d46ec5c2cb6f023c6d309bf02181c4 +2024-05-07T10:56:51.627Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h6fcbb2a0d54b0642b12c84661f6a593f95ebfe0d2c301e3be8445cec7e5235c0 +2024-05-07T10:56:51.056Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hcaf2b7cb06cebf2301c74a162ad03d22aadae4f57bba650b86d4a4769aff59ef +2024-05-07T10:56:50.771Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hbfa960d7274d8ab2c55dbbe59b0ba0229d68f21b7ecbb7e064e63a6bb0558e58 +2024-05-07T10:56:50.628Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h83e9d2f2ed9249ff8239bdc202781107258e9747ae9aa8cee1e2a41d95c16e62 +2024-05-07T10:56:50.522Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h9ffac7db237c94776c07a6d582b271ad08af31175f2d00f3b6e1bf94299d7236 +2024-05-07T10:56:50.259Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/haad076b908ba13296b9d75ff4f176e0c8a8aa6cc8ec0304ad5cf404b7e05418a +2024-05-07T10:56:50.040Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h2504e81df796f71459ce76a45155b41913e5c6f600d491ffa1a63adc3645b62a +2024-05-07T10:56:49.275Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hce4fed3a0aeb6390b055cf2b94187a1b95f5b72440c38c4ff0ae2c241e3fce26 +2024-05-07T10:56:48.991Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/had684b5327b2ff532dc59b2e9f1310e2a102c1c3c7dbe5712ff0c77bc5683152 +2024-05-07T10:56:48.982Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h35e2ac2a8d9d139e00d305296fca307f28604781a942b775f95cf07040e2ef16 +2024-05-07T10:56:48.943Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h3a3aa63f0eee88afae8e792ab682d633d88099f5f6dc3058bcda11a2b9128450 +2024-05-07T10:56:48.893Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/h254c8e3474b59bb2c5fb224763abf15f365ffcf24b5fff8574cd21794428d73e +2024-05-07T10:56:48.219Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hb9602f9485bf19d3884e682f19555809811c38bcb2e6f8e82c1e08262c2ae369 +2024-05-07T10:56:47.960Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8f147dfc3bd9c19c853a0bb306417ad3957597c56ecfe6ef43f8b1fa5d98d08a +2024-05-07T10:56:47.472Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h59bc984346efbbdbc8d246443f6f656bba98f6a4b2b8b49e15c95cf1f96d48bd +2024-05-07T10:56:47.439Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h786d823a5af1321e558bea1db6bc17827ec4b60ac24030e07010c618f754eb4e +2024-05-07T10:56:47.313Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h7b25b9bbf47030fb3ac091e88ce84963c81629981beeec9f78bc7ffb1bbd746b +2024-05-07T10:56:47.298Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hd2f5510047f35eb268f2bb07cbbd1dd18053fd0bde50bd7c1777ec6ffb2d8b97 +2024-05-07T10:56:47.226Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h065a00dc4b71aeabbe11eda29adc7834fc1e19f0b587481c7ace31b1bbc7f134 +2024-05-07T10:56:46.818Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hba060331979eb0bf65b6ea5d8d7a44acb95e855d28de5c90917a7ee3b504b7ab +2024-05-07T10:56:46.612Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h046a2ab4ce6142bd648fef42d453a2f571af6c8a40eab143a0fbb01e8b01d46c +2024-05-07T10:56:46.169Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hed127e4cb0670fb14235866e2584556c1a68df7e682900e53ae082799795f1a8 +2024-05-07T10:56:46.021Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hedbc5711ba756ef9b05aa7bcd1806f47222653cb9fd80db4b8d1165af2b34d8d +2024-05-07T10:56:45.904Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h116ac4bfdc39f75680c1083db803c370a0b5277d4ed9da1448f8111f9124f87e +2024-05-07T10:56:45.783Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/he6395b0c2fa2bed06d914e71831cdf155b11dd37affd85205cc5046c5dc7f7fd +2024-05-07T10:56:45.762Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/he968275cb0954aab36ed72bee6984f17042a915fe9e12ccb8167cc8d191504da +2024-05-07T10:56:45.731Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hbcce49201064c382d363492c325bb2d5cbe91ae49dd41d5cf5ebf1262a7893e3 +2024-05-07T10:56:45.380Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h308618c68d555b04c703ab54a6a2166f87b37c703fc9658e3c2fde045659d640 +2024-05-07T10:56:45.063Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h89d4c615be94e2cf27bf83b8537efa6475ab829fd257bcc06c44aa496f6a0c46 +2024-05-07T10:56:45.055Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h5278baffa781fe2367864aaa9cff679849d0f1fece90be0920e1c91058716427 +2024-05-07T10:56:44.884Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h2335618a83e416e75d2bc96c54ea4340fecca5a19d709d09c3e520ac93743e5d +2024-05-07T10:56:44.530Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h51a0dc7300b627358d1ee7d962bf0bcf56845cc583352d62a20b40ddb05a390e +2024-05-07T10:56:44.436Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hba22ca82c52aa1cd8ad95bc87b8a911c7373a7f0e680d61a70ccad0054236839 +2024-05-07T10:56:44.377Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hba98e49c099ac4d218f1ce298042f012365b7f00575a2503de16e1c06abe632a +2024-05-07T10:56:44.104Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h4562b19b4e01aee240cbbf2ed7e9e7b501d6b4b7acf4a0f672dd56fd6fb98bc0 +2024-05-07T10:56:44.038Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3bf46837b3fa7d8dd1c82772375f7560c82d4c726513fbf5caca25bc7005b3cc +2024-05-07T10:56:44.036Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h2767ec6c8111567ebc485fd2d3d781cb94ce7216365702f838506c5968f44f84 +2024-05-07T10:56:43.707Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h772c2d588bb01e7258b81edb8855e31d72fc061804dff0f3717f130e3d254454 +2024-05-07T10:56:42.913Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h12a7af976ab80fe2c660afc58cb474baa7c430e7582b3e2b0b4f724ab70341b0 +2024-05-07T10:56:42.831Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hbfc8188cb5efc72102ba2fecbf58b0158ff5432010817e95d1d413ec64d43353 +2024-05-07T10:56:42.777Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hab89dfc8e629a0f88c6a01b5c55ebb3e382ce4e60660bbeff6cac1a42744fc8e +2024-05-07T10:56:42.498Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h80f0f5b5ee34024eb13fb1485b81fd70673435908d2dfae1f0c9b500620fec49 +2024-05-07T10:56:42.332Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hc16dafbb71c424760b376e7bbd9c7f9101685559d7e99203dad2e8b8d5870af2 +2024-05-07T10:56:42.053Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hb59009da04c0fe34c1c711c2b1db08e501acb2604513c30fcc90275a3e5a5009 +2024-05-07T10:56:41.893Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h243a0b9869914afd40ad37ab181b98d44d088efd853113c66ce077c6bdc1d2bb +2024-05-07T10:56:41.609Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h6eac031e6b2e6eccd49b701090dc0dccfc23a30ff4b3c8d05096e33d888bbd08 +2024-05-07T10:56:41.309Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/haababec00ed9bb8aba7b43140b925afa10b0dfa591d2f54b9c5e88d47e06521d +2024-05-07T10:56:41.173Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hb59704b9f186b65e45d6610680439d90c800d803ad22aba9644e523ccdea4980 +2024-05-07T10:56:41.099Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha54fed3bb1879ae76de8c6b66300c326128067f00295373376daa67202d60324 +2024-05-07T10:56:41.093Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h81667fb58d3b6eeed89c5b0487666186a4ec5fc863659b519ed06c7feb9947ca +2024-05-07T10:56:41.042Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hc64936c5ad41398def5af8d8ba51465ce8a33bdbf3b581908defd0720c776ee0 +2024-05-07T10:56:40.918Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h072357caf5f1b8558d7c882d40b4053155cb8936e6fe37ad3d53536e01d48655 +2024-05-07T10:56:40.848Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hbd5ea754f78b4444fdd32953cd6206af22f52a7136e83f15a85d010810dec59f +2024-05-07T10:56:40.196Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h2c378b836a7ae9f80d52ecc1863bd169d193f11b433f9d8315aaf61f2232ff79 +2024-05-07T10:56:40.189Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h339db50907fcf56988d6513fe574f172edcf0e568c911ac773b1bfab7002e9b5 +2024-05-07T10:56:40.077Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h67dd7798865cc8486caafabafa609537403ac26950999264ac0e859a03dbb309 +2024-05-07T10:56:39.685Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hd4f8bfe3324fcc8f990988cf574b763a8294e29505a9eaed6c21867822e28078 +2024-05-07T10:56:39.626Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h2c5ba6dace185c688588bdfa9442f433088964a7e9da87de79715932642dccd6 +2024-05-07T10:56:39.487Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hfac0498bfe0706ddf10cdd701cd877260c51922d91f1d5a26e5e6b26354aef52 +2024-05-07T10:56:39.424Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hf7c21c737851959f2db83a63dcbcf6f2d4c1aeea07ba8bb42d1201f572a40c0c +2024-05-07T10:56:39.341Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h38449260aad440b5a8a4a1f07f6b18bf710a390f6183464295926d9e66de8891 +2024-05-07T10:56:38.882Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h5129d6fd39c25bb0fada6432a16a3fb8a7de5ef0150f5f8e62d1756bc3cfafdf +2024-05-07T10:56:38.667Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hb44504bb7c6525a7714872ac11363e857c1e88e671d32e21df0faa98d4cda9f8 +2024-05-07T10:56:38.667Z [INFO] expiration: revoked lease: lease_id=auth/gcp/login/h4c031a99aa555040a0dd99864d828e946c6d4e31f4f5178757183def61f9d104 +2024-05-07T10:56:38.138Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h53f982e19a4c74a70af3604ec2063bc4af528892de197bfc8f07ffcd02d740a5 +2024-05-07T10:56:38.117Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hc816a50a1c69ccde2ab933addbd80e8235545ddf5eb7ac2f14e972578c258c26 +2024-05-07T10:56:37.970Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/ha6565fae4416d7d65b25cabe287f2fdec72a3f909467eb52a854d11814ca173a +2024-05-07T10:56:37.729Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h2d281dab5b8f2ada392f682fed0a39dfcbdaf5e531f6080b8fabb987550d8b9e +2024-05-07T10:56:37.596Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h4366427b5485c0e83e0a0b1fbe2268a79cee3cba9ff5c88586bb917b7e154ff3 +2024-05-07T10:56:37.337Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/he066b9e7c0e001ac58fd908e5eec7d4ef3c44b9e31aebff2ad311e7f3addfc06 +2024-05-07T10:56:36.770Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hf59962c6824b620c0f85c325802a1ad4e090946040fd18b680ff7cc8d152e4e3 +2024-05-07T10:56:36.591Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8c3e7a8ca026f0543fea2a51ada43f8cb21cda4bddfd04dfa56ee94d5cc19cf2 +2024-05-07T10:56:36.555Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hd56041737930ed2567935f91f239b22990ff20b6d5e9df349e83374d0f70b7d8 +2024-05-07T10:56:36.199Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/h3a20b1e44e288cd5fb0ff0d0c4e42a4ebad993c2d454ec7018dd454d8e006d62 +2024-05-07T10:56:36.144Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h8f4e3a34083f3e3b247b6113b3dfa5294f01966df3262385391b394b8baeaa12 +2024-05-07T10:56:35.344Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h93491be27afd5856047d3fa27bd9b343a59e8b315b1ba61773cfd1ed7c4d3a1b +2024-05-07T10:56:34.994Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/ha232dab8623ab1a1d1cad0434030de2535c3d1d7f139ee831f1b41753ad86782 +2024-05-07T10:56:34.981Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha720133e6307ec8727b61546509b84eeca20cc9e1b0be9022d36757d162f45e8 +2024-05-07T10:56:34.725Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h0e8f6378ea682e4a62456b1c036e99225abe374f14d485b6332d98acde804a1f +2024-05-07T10:56:34.325Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h7ab2a8847fcf6a140bbe079ff87ba4dd5b08300b09bca233ef50fd71a3c7d0bf +2024-05-07T10:56:34.051Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h967ebe0f501d655098d6d82be96eec195d745441b26c27ae569b320bab831cd6 +2024-05-07T10:56:33.790Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h5132908bb7ef7f232b7e66d50ea5e93c3e7fd2a0b40dcb42307da08643ec8637 +2024-05-07T10:56:33.546Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hb25360fd578960e361f1e8bbd113068b87287cce4870b9c22434cb4bd5b3bf6b +2024-05-07T10:56:33.470Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hfa26e0c12143145aa15eeb05f7527d4164b3d45f0a9bcfbf347c628620a30a98 +2024-05-07T10:56:33.175Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hf77c7471b51190c53e2ede05b8872e6fc55085472238b9921b1456cf590e2b1c +2024-05-07T10:56:33.063Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h59fbf67c14ee981a7dde1603ef500c4d881a76bb9a36c5abf841b8144295e211 +2024-05-07T10:56:32.799Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h35f8934544f210c27bd81840ae30268f6763b0bf289a8d7572e7d34c80d3ae2c +2024-05-07T10:56:32.682Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hc275d6d5b92cb186a462fc6d35a038b99ba5dcd938b7361e1a29cd4e2daf44ce +2024-05-07T10:56:32.424Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h61b03dfcba89db1744d62cddd7351d5b0137b2ff1e4399eecd78d713865078b0 +2024-05-07T10:56:31.984Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h1dd79f94c89e4ed3ee6c27e8a82154ccc57020ad6a68b0c131151667995aea47 +2024-05-07T10:56:31.870Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h66806bba53e309b606483400f726fd3409a495218eac70ad8682c9bcb7162e8f +2024-05-07T10:56:31.652Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hc377272903b2399874bf19cc1b49b8dacb6475aa563d963cb6bc78b69bf2ab51 +2024-05-07T10:56:31.616Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h23d737d97bdea5560f9562ea1cbccfd1f40fef426734a94c66e22d292098f447 +2024-05-07T10:56:31.168Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hf1c25cec868a74694df6154349ee3c4c2d2acf5225ebfe891b261da2dee43152 +2024-05-07T10:56:31.010Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h53186ebbcde1d91dc800dafb7249fb154cd5d146c5877e70a859a898972d2283 +2024-05-07T10:56:30.947Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/hda963da2114a3c4176a5230cf26b32926d21d122e06b8fb5cdcb74aceced5ab9 +2024-05-07T10:56:30.806Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hf89c4b1988db9f29b6bda0332a275b53f9ad0c0b88f84fc7fef0fd472863a12e +2024-05-07T10:56:30.461Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h77263812d7ad58bac57d3faaf39f9ff7bf28b7c96cb8af643a1c11ba00d49ad7 +2024-05-07T10:56:30.253Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h61644d14dc1dced55d432a39cd3573344138215a7a837f5bbbe2276f073c2f23 +2024-05-07T10:56:30.070Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h119dc54762fc55b6277c9f73ca8c7e12fd3c8dbcec73a73bff2792c1a8cd79ab +2024-05-07T10:56:29.835Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8e6f443275ee17733ca5435a155c5cf7107f28a861fbf45df1a367b8ef80b37d +2024-05-07T10:56:29.522Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h1403854437fbefd03adb6176b2e204e205d71cd880db3b87c112fae7724a69ca +2024-05-07T10:56:29.342Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h8205af09640e908b47fb1e7efb9b50c56a8195eaf3cc3a8e97d2ca3fd15facf7 +2024-05-07T10:56:29.260Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/he685e066325ecb9ac2e34d3577c42f10e40cacc97540e6e6f02e812a1ba86212 +2024-05-07T10:56:29.054Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8114d77b4d45e3996c085f1370e010d83826ff297abd65142981463bce7c881c +2024-05-07T10:56:28.849Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h66835ba7deaa091b8721166ccc0dbd6976e6b2302c96d764d6cbf620e2c5a4fe +2024-05-07T10:56:28.773Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hf200ec739cd35fdbe4292936c6ea68709834aabad2cac8841c6c0c3f506b273d +2024-05-07T10:56:28.555Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h91207831677be02ea825cae293ec5b8b3d4e8af5dbda2c9b570e1a20db9bbc3a +2024-05-07T10:56:28.543Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/he4cd7b9b55f6ac463b70cc75f8c81868ca5f3878a69b9344033350949c087170 +2024-05-07T10:56:28.086Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h2de1f0b8bead422b566db78656fb529f8711d248a8e22ce864737c3bc71cbc7a +2024-05-07T10:56:28.057Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h170e56585c250f84000231c6dc4f6712a42fd346c93fc352ad0c023622f03d27 +2024-05-07T10:56:27.965Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h783360d7ec8aebe518d06bb6749cf7332d76abb05a71d85a06a22c167d802ed2 +2024-05-07T10:56:27.808Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3d2d0a8f460c0e666249de47fb3abe44f81835167f90db482ea6cd8921ffb3e2 +2024-05-07T10:56:27.500Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h4acf67df0fadfbda7e01917cd3d9e8d374a309a8dde5ed5e94f7bfc27f2ed65c +2024-05-07T10:56:27.116Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hee3e908575e8edda14041b8ed3bac5c261d5c6a457f6b64c32ff16a4724c3b1b +2024-05-07T10:56:27.014Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h103f2f75a855a81f8e20bbd5dcccd177243c2f9e352b46df7c05e7d2b0051ca8 +2024-05-07T10:56:26.888Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h07bd44284764cd876afefb30cbfc464f6f5bd0a2513b1d67758a4a734d4cb11e +2024-05-07T10:56:26.684Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h427f1acf46b1c143b5b0d62efece9609c1b65d1255fea418fbd13a11d31a21a5 +2024-05-07T10:56:26.621Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/haec98e0c48a85e91c4b236bda59e7be5bb4fc5795bfdfa219f5277329f6909fa +2024-05-07T10:56:26.422Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h0caf64977668a36586c240a9ef8c1595e4f16f81c0fdc1b050be0214f594d3fb +2024-05-07T10:56:25.895Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h0dc3280ce5f007ad5831e63aeba094d05c0ade2193c8fcc18cf8fffa189158be +2024-05-07T10:56:25.817Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hf06927df7fa38cb71f1990be1b4f453e1d81738ee86ffca6dcc6abefb242fb74 +2024-05-07T10:56:25.646Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h605dbee1b1ae8d8c05dfafbd552bac296c943e767dd35f9e0e02d26d23ee7eaf +2024-05-07T10:56:25.412Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h0245817f6339f4da61094ae76d545eebfbfd429af99074c1a04fced7c5433297 +2024-05-07T10:56:25.089Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h1a5aad38909c5c600dde829837c93d25a76953c558e1bb6cc1c42f6c1ed66575 +2024-05-07T10:56:25.001Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h2f77ec24977ce9b9d12517833426a4a89844d85e2f24444e8a80946b41bdfb54 +2024-05-07T10:56:24.991Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h12daf180d69a4a45970e0aa7a9c04fc9ae9f9dafc30a8dcf1559b05dd41820f3 +2024-05-07T10:56:24.901Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h78efa14575d3c95c28f260a3ee976326b7d06539bb48c137b3e8b56499ed0e49 +2024-05-07T10:56:24.542Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hca47af8da864bb0a89857994c58d60a4f29a0f2a4658c5daa9708087e0d8bde9 +2024-05-07T10:56:24.356Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h28a4d915cf7a75c35f886c0929887d31129b8c69d2e7d158c1e784a16bb14ab2 +2024-05-07T10:56:24.201Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h6406b7e5aea085d09a40083eaf725cf29f3634df9f506f0e965c2ad37ff9a7e4 +2024-05-07T10:56:24.100Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/he8bbadc0281ae65ac510188bedda565a18675d9dc7d7c5b102c108a56352ee01 +2024-05-07T10:56:24.032Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h039b01ae969a5d4f7ec9e06df002e26fb3416ea90949ed3cd71633f25292c44b +2024-05-07T10:56:23.821Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/ha5e326e8f84212cafad480a726c24d062263e9538128b6d8ff9929ba1bc34ab5 +2024-05-07T10:56:23.414Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h3b9c19ca3aa85f536141513b457c32139ffc521985bf05a62087170b30a6a550 +2024-05-07T10:56:23.085Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/ha64e6b88ce7e0dbac19e366b519a4015e20bc69d4c75207d8fb268e31a0bc876 +2024-05-07T10:56:22.860Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hd594dad77a54e8f5c78dbaccbc26f8a9d7d3946c5cff40f52e65f13082c12f82 +2024-05-07T10:56:22.707Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/ha0d22358f64cc57f68a3c38527ed301d8847adef7be24b223e8e8729b8cd7438 +2024-05-07T10:56:22.338Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hefd4e44fad74ee71b3733c452233064862ef45c915637560898b31a4887fc5d0 +2024-05-07T10:56:22.305Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h72b4deba8edb08b5aed95a18451a681c650da603db7504211a67a8cac66e526f +2024-05-07T10:56:22.275Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h042c3a83918d3f30a05037b352a40795365f859b545b5b162fd1a7215fc0e9a3 +2024-05-07T10:56:22.230Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hdb27b6908584211834feb636da909732327d698606118f8ff82ff5958686fba4 +2024-05-07T10:56:22.042Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h373c0898d511dbfac1ef4a1d07ab1a37ffded783bbe6bacae14b3e7916f9787c +2024-05-07T10:56:21.508Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h3a98525c905e691a68d0c13310db4f5f31efc8df488b89504424f6021761571e +2024-05-07T10:56:21.429Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h80d39ea6379689b18b68c6a34d1839103a16bcd59ad38672cc0233c1c6f836e9 +2024-05-07T10:56:21.302Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h1d460a58fb22460f9897b43f7ed09bc0e31c52b86230813116c5597453041440 +2024-05-07T10:56:21.231Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hadf0fb412cc0f67a0ff1f39e62804923c160e6466e4eeb4bcce6441e395787b6 +2024-05-07T10:56:20.634Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hcf2a20e37d65ae91cd4fba60354aadfbd75642edb9a4b3651aefcc362f6626ef +2024-05-07T10:56:20.561Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h429a6e619ac1371fab609927991f93ab191989b3c8d1a51d5b6883ec56c5b6dc +2024-05-07T10:56:20.538Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h6f4dd876412c95d0b333922a4601d8a3208b7f24bfa8955a529942d92a6a7324 +2024-05-07T10:56:20.512Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hec6b0b46b55cb0d5f5f92626fa970aae50e9e440879009ff021ae72bea738ad7 +2024-05-07T10:56:19.880Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h6194c7c32a73203f86ee920c09ef84174f198aad306d46583fd73ba330dc3061 +2024-05-07T10:56:19.850Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hceb54c683ffc6d86e65bae76e020d536f292620e8751813eef25a716961ed8e2 +2024-05-07T10:56:19.699Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h65c032caeca9b72f89bcf1d7774a2e7d935c99f691f731569c3febd938f75303 +2024-05-07T10:56:19.500Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hf35ce799e008ae11d4865d70092a5de471ca059cd927f7f5eeefc034d58bf56f +2024-05-07T10:56:19.240Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/h2021d31160147d3a2ad00fb9f38462646603bc6d97d13eea9621c3b2f94e6330 +2024-05-07T10:56:19.088Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h32c829855713d77e6a065e6fc33f71f89041fc30d164ac31b5dd525c8588b70f +2024-05-07T10:56:18.791Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h1c29838dfa0f835c05c9a6c403ae7df470ea4545c2030e183de1089a3311dd8f +2024-05-07T10:56:18.669Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h50105f1150ac81bc7c5ee694172e75531d235909555a0075b9f30e8f7780b057 +2024-05-07T10:56:18.437Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/haa2c103e289e8231314cf1660b7e33755b7aee2a498ef0e6b42fda97e605b890 +2024-05-07T10:56:18.259Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/ha7051159532f6cdd91ed278a3da98c7f16fc0abad15de966e9266ed1a697504c +2024-05-07T10:56:18.246Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hd140cef8cde10f3abe50b019d618feb0a80ca4eab30fda2058035fe65f20267c +2024-05-07T10:56:18.113Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hc54225d41ac34230b51a3cbc7accff088eb7aecc426aa4185088d02a2a211dde +2024-05-07T10:56:17.942Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h32c8f0ee3cc5cc56a8b0138f4ef4f820b12afa13d587345e63ac171f5bbab6b2 +2024-05-07T10:56:17.875Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hfe39b46d00f6781615fc74caa8234b3b60a2ab4125a1acde916c795ad2cf5bf1 +2024-05-07T10:56:17.543Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h4211fe014955806f6ae776b7d0266d1644da69a1d424ef397392d7ad50b98ac9 +2024-05-07T10:56:17.304Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h600e58f17fbcbe3adf331366c3db6e3163bc8c89bd40eb6ef93243e0fc8b97e0 +2024-05-07T10:56:17.280Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-aws-paris/login/ha2deace272a3d0892f2bc21bd354aea436c111510704e4fa847b940515d806b3 +2024-05-07T10:56:17.037Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h937e2a7078c72befa9e765b3d93b677531128c0f0a82fa8a2b29cd61048a4205 +2024-05-07T10:56:17.029Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h714709b9f907771b2c84a5e55940fdf79a26bb82c15b495dbc2c3e5b938cf4de +2024-05-07T10:56:16.402Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h0a23439368e04d88a01d37647bc4603057b3589b91b49fa31aa3b6e749847a4a +2024-05-07T10:56:16.246Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h848ed0a8923b0cfec248b80c632b257e5ce227c83f8c99e93fab5b145b4a90e1 +2024-05-07T10:56:15.990Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hde99cb2c6af50559e169ec79222e5210e14eaa6a676b9ba1d5a8d89d53d8d352 +2024-05-07T10:56:15.835Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3c61e0f3d395078386bb5315f04d7cbc8a9568d55f2ede5a618425e25b383a99 +2024-05-07T10:56:15.611Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h5de74b21da53d39f0df231dee991968c74a5852cbeb9a1a22e770d5970c4b8d8 +2024-05-07T10:56:15.450Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h907b1bb06b0c3fa66e2c477bd10fc0cdd3c21d09e9900ae14d12be16edb6a5a5 +2024-05-07T10:56:15.039Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/h08f306062b1089b912b90243e42d04b1751e79f840d4fcde8eee26bf4c90a092 +2024-05-07T10:56:15.018Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8ab935af6263cabd3fed4e2e0287f6955c08106997e155c88ebfdf0d897303a8 +2024-05-07T10:56:14.949Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h91320d4ed57c6a4610d53da0f2fe40a6a36d64589d99dabf0c91def7ad77d56f +2024-05-07T10:56:14.649Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/ha5aae89bb1f9d78dfa6dcf7a756932106affa45cf7de5a00b7dbb8e6fe21a27b +2024-05-07T10:56:14.510Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/ha06a57b9e2e28fb795ed81f5527139d0388b89cd3d18ce0e39f9a6544d21b1d9 +2024-05-07T10:56:14.450Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hcdeeaa19ef45ce082e20249c865bc4a933c82d1bc471a74855e185066a31fa89 +2024-05-07T10:56:13.885Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h267a2cd3b96ae60d1b85659d25ccd5ded62147c6cea2e196d331b4ee5160b28f +2024-05-07T10:56:13.815Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h576417ddd07c7847b8550e5963f4ef80354dcd54c29f681942c2661b4a241066 +2024-05-07T10:56:13.697Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/h8970261379c358b60d5ee25cd7443ba4fb271f25c1aee2a519dfe80900a30e22 +2024-05-07T10:56:13.593Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hfe9875f478c288a1cdf965b5eac1ddb79bce40e51c5be1206bfeb89a29123335 +2024-05-07T10:56:13.438Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h6c2fe29f62dcf8f00d4774e964311dc6dab58ff66fe3998e52c31424d83bb9c3 +2024-05-07T10:56:13.280Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h72d12de600c6866213691bff0e2fa84dca47667a1f250cb3c23c2b99978942be +2024-05-07T10:56:13.268Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/ha07ac95c3543771d90d2b9b070ed9f0d680fff6362e82058f42b1a567aaf8921 +2024-05-07T10:56:12.884Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/he43a3c0cbf6d2013a509d982cb5892c4727c5538e2291dc2646d66b21e097331 +2024-05-07T10:56:12.754Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hdbd8ac3d95d8c8a72b7a450ee670f1ed92beb6d968aabb96ca179778bf449e5f +2024-05-07T10:56:12.454Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he96ec11890f253d73b7eaf843fb6a9f662c5b7e40e00959e93b52ea5bc9ffd47 +2024-05-07T10:56:12.147Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h0034bc9f67f8cf0e714607b4a8a68c80cfb75f9f9f3eb21dccede44d0930ca5c +2024-05-07T10:56:12.016Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hd14e18c035d3ac513fb4b6ff0a2e0c5662c7591632658d8b521d113b66bb48dd +2024-05-07T10:56:11.761Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hec0a387bcfbf41ce5387d99f11684fd7cde336a9bc1fecc9b60dc531518c44d8 +2024-05-07T10:56:11.620Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h82f80c248b2f3d11bdcd0baa929084f7a7a4d9d7f5f4021a7c7aed82baa640f6 +2024-05-07T10:56:11.505Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/h9c30ca301e6d5a4cca26792be60db83f0b69531612299a477dc9558ae2fd7de7 +2024-05-07T10:56:11.308Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h2709a1f5a8920908f51dac600b5718b7e0c72fb535ab2083946fea091d8828c8 +2024-05-07T10:56:11.256Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h0be466e3e0526f0a206eab6f6aab1af9581a9b988dfd7e70896830c103d495c1 +2024-05-07T10:56:11.213Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h4fc28f3440c33a0bc25cd7a23ca526713e7fc9d14ab3371f6af3ff307ecee2ec +2024-05-07T10:56:11.208Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h7cdbaa9a39badb86cf9f9b277ad82b48fc252197a86eae8cb6181f1dfef82fc2 +2024-05-07T10:56:11.004Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h6f609ba47fd308ece397d9ff0f2749f64e1a4ec991e86f0d5e950e6662393d28 +2024-05-07T10:56:10.698Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h93dc7ca384c5d7e4e8a778e8e4f44abfbdff319ef5f7fe82518fbb9aeefd5496 +2024-05-07T10:56:10.450Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hf7a8d4f87fce5a57a93c56385fb38b1021e4eeba6843d825ae73fc3741a30bec +2024-05-07T10:56:10.387Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h2e669b38ef4c5c5e612a03e22cdbd682c59fe3919babbb6320e8fd9183458163 +2024-05-07T10:56:10.216Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h4512b6a111c69532e63670ea30b89c41ea35d26de470d992b3c7e03b59238227 +2024-05-07T10:56:09.566Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h06ecb413577843a57cfbe59298393e956d7f8a5aa7982e796c265d7d980b6e94 +2024-05-07T10:56:09.488Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hc89c7878bb37698c6e952056778423e9101c86be339422f5432c28aaebd6003f +2024-05-07T10:56:09.328Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hee058f4ef15091340e4143a5a92c7d048082bf51926c433c887f7eda7012cd50 +2024-05-07T10:56:09.267Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h5daba312a47d0c94ac5c067480c6aa614df4fc97eaf85041871a9c05037dfb87 +2024-05-07T10:56:09.206Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/ha30a12e4b22f7b103e928990467efa107d5078dd4fed0a9627022a14c3fe569a +2024-05-07T10:56:09.103Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/ha601681a6b9dda7c15505ec4793d9409ebcc53cd29e61ab5ad82df7ad03434ce +2024-05-07T10:56:09.003Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h3cde8e9f0c8d468802a1805df741b2062b87e72c624677a4c4af0ffabe1abcc6 +2024-05-07T10:56:08.858Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h9ce62cd68f7257e37570e51d1af7df764df46685eeb681c2dcafe8837ef7a83d +2024-05-07T10:56:08.739Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h33394d0f6a58fe280b9d9962a0d31b425a7b73a237ce6ff40acddb3b25c557f5 +2024-05-07T10:56:08.294Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he16f8e964c2d4c4909938c7c36261316e4cbcca6c79acc336bc63f0eb5707913 +2024-05-07T10:56:08.236Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hd827de108bc4c8ac49f2d79b6a86ad5adc212bd480f1a920cd9e85cf145848f9 +2024-05-07T10:56:08.042Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hea827c174749eeb39c7ec738779ae38108c22b6e4aad26a428ca040adba5e5e5 +2024-05-07T10:56:07.763Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/hd714009da0fa78fa70fd9599b59f766ba663cb8263213dcf89fd5ffe318296cb +2024-05-07T10:56:07.632Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h2f14af7d8f4d3913cddff7195afeee61f3da524ef240a51befef1cd47f7af709 +2024-05-07T10:56:07.468Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h411e1cdc01697ad3cebc4265be639dc243e4e35be9429a9d6d31e49b8912e140 +2024-05-07T10:56:07.440Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hf4db945b045aa4bc2fc6a785e28e9c99458345f3f1938a44a6b2ac7b9177827a +2024-05-07T10:56:07.268Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h0152c477f15d103ba732864d6559c819fbf2b4b22fbc19ef0d28b87e47a47297 +2024-05-07T10:56:06.556Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h36beddb8526bd45080ab1006b14476c57d0527afb845bb1af4d59208cc141567 +2024-05-07T10:56:06.456Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hb9009bb04225798d09fef6c984dbc14bae2670f04cb0bb64223dc6115526bc1a +2024-05-07T10:56:06.353Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/h10912c1ab8189f87adb04dd1bd399ba06ab292d349e878d26f3c19fd52a11ff6 +2024-05-07T10:56:06.196Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h19b50c7a421a2a41ae995890c403527fb0d06af3dd14c646a1c33ff48293e947 +2024-05-07T10:56:06.121Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h095d023d7012cbc4ebbe99cf729366a3ce74bb476e359b784b3ef250d0799a02 +2024-05-07T10:56:05.959Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h667a290515d79cdb02f59cb0e9866ee93e6060d7417d4960d8d42ee580c2b666 +2024-05-07T10:56:05.818Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h98f1ce6ed72d7db18f6e897c5d64e0baeacd0f5c9391c41323115137a815e8c5 +2024-05-07T10:56:05.759Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/ha24b7112d2a0916e8e2f651626e136f74224b29f7922a6cefde47d5fbf7c6d4c +2024-05-07T10:56:05.681Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h1e8a002b1986879781bd5f05ed246ddab23b961212baa47328f5c1f3431bca1e +2024-05-07T10:56:05.604Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h11855596dbfc07d83d38aa567e3c5e1a0346bd9353865ce5a787324e57858419 +2024-05-07T10:56:05.108Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h89ce0a19603f9c2f02ef5c8304dd20ad8d5e050c1cce71878372a9b7d881ba26 +2024-05-07T10:56:05.071Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hc5af7fe8a5550c8d7f97cc1acbb658996752bfeecfc387663bd98a79125f1d55 +2024-05-07T10:56:05.060Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h59a33aa59f379843642a07f52e4326c36f8e153395bed12aff8557e434be8c85 +2024-05-07T10:56:04.815Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hc7d4562fd0e68f94dbfcf7ca1b7d6e89638ed0d4d78d094a57bfa65c742336a3 +2024-05-07T10:56:04.747Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hadc36864391391e336beb65e0ad4796ad6586f71c0399a92bdc51d9dfc9b1220 +2024-05-07T10:56:04.416Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/he9e15422b89af32ab6acbd77d9a975d2c578baac0202d487992183ba2537bc46 +2024-05-07T10:56:04.311Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hd39b6a5fcf4992d5ad0ff5012e62b815cb241edaf7863bfdb740c1b58c631d95 +2024-05-07T10:56:04.197Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h70576da7976f06df4a053a265d72b3c41d37885e5ce220627d18280aa38ca43d +2024-05-07T10:56:04.055Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h5845c2f5f56753c77da90a71a9c2443b21beaf1d0d78bce9f2efce9aed0a8a11 +2024-05-07T10:56:03.553Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h7fb69c60c29c505c5e84769125ecad6382bbb98ca2778289e2b2106fa1fda809 +2024-05-07T10:56:03.483Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/he0de9700c9257de48f98d4cef95c3cd122615d3674238ea891e9aa3f36796564 +2024-05-07T10:56:03.078Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h2131cf7d8d3c300c02fd2b1e13f35114933d9568d2ebb1bbb2df31c027eebcf6 +2024-05-07T10:56:02.796Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h72302e43ab8dc2c4efe923f8f6ef0df42154815df1a8c048917dafd6f3f9e89f +2024-05-07T10:56:02.718Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/he6c993c9185377513b0d2defc7f00e1aa5aa1ed51d1cc87b5a8893dde3bba59b +2024-05-07T10:56:02.606Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h33a2b4f558e055e54506d4dc2cac301836150be253dc5b3ee3eee98da6b7a166 +2024-05-07T10:56:02.411Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/he0a711cbf0824c471896c4ec7f9f72d8a527a7cff0bc45821f5255fcdad0b853 +2024-05-07T10:56:02.279Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hd883e4407723c2eb4d9f1c71b2b9a06bef835f8efede1695c255947e9c21e071 +2024-05-07T10:56:02.234Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hd0c5693cb661f4307de69106677e75f3fc1bc435c5b552248949fdb162c18b75 +2024-05-07T10:56:01.916Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h0b982f3ad64c2dcdb75b19c489b695815c22bf7b6a68a8021c2471e68d66fb7f +2024-05-07T10:56:01.849Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h83fa300c5bf320c42584ef9f3237940b607062f9e70fdb2200bacbb3fdb66e1b +2024-05-07T10:56:01.627Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hea7f7d29229ae5f2221a548e457bf62b7c6fe0b853e0ec761ea3d1f6c1a78dc5 +2024-05-07T10:56:01.300Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/hd2d84946362ac284e94879bf0c207d489e31bed2ac84f1a5c5ab2fc770890010 +2024-05-07T10:56:01.104Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h67bac90173106de8177b387bf5971a335579f7ece7262015fac2c2a631fe650f +2024-05-07T10:56:01.067Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h29c1e8b4368f6f7c79b195b372f56f90ccf19b06c934f68ff891ea15d50024bf +2024-05-07T10:56:01.056Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hd0b206a3ddb5accc76ed13d1ab442aa09173160361d1ebdc2c36144d4094b2ae +2024-05-07T10:56:00.618Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hc351bd93fe96831c23ed44f9e4dabd0064bc73dc0f47934355482e1fa13d0cbb +2024-05-07T10:56:00.146Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h447115655867eb3d34932c841d1f6ee576e6e66f6e985bb136d5154cf79dd1d0 +2024-05-07T10:55:59.954Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h9941a8e2365bdf2143f518625258870ee3a2d53e8bb206e74716c206140006fd +2024-05-07T10:55:59.591Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h3c9ec1b7314a7392c38cb7bb04bf3cf8a97ae6d91c6423823c8d4aff2e5c5d0d +2024-05-07T10:55:59.446Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h19b5bbe167142515ffecc69132f844f7a234599d756ae4fce61cf8b62e1aa4de +2024-05-07T10:55:59.435Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/hda45792a3e9898ab01e34bb71f03ef8374065095bf7fefe1462735f34cfb0f9c +2024-05-07T10:55:59.086Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h0c01e98bfd64e06822490439a69fa56a143766d3e03519bb8014b0e4454ecf32 +2024-05-07T10:55:58.668Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h966f184d3e139204d71166d057d87ac337f9dd90d838485453b0a5e84bb5b5b4 +2024-05-07T10:55:58.476Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/hc6fa85666ac1155c0e4f56a8f42ce14ff6c579ccd70b6d7b7b409bff5eff1b48 +2024-05-07T10:55:58.236Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h345e367bae7bfa4dd4faf37fd63ec6fadb0685a7ef5544595f3a49f2907bf6f5 +2024-05-07T10:55:57.959Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hbef3d3a1b9e50c16cdbe990c1aff119f34136f9be5572be4a7616f64aea15531 +2024-05-07T10:55:57.799Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h640776a0cabb64223ea72785617900a0c96cb1705cef10c85e622339703e7817 +2024-05-07T10:55:57.345Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hf8ed73a8142d9a7b7207d006f1c645dc9f5b95f503cca39ce46afada1a99b3a5 +2024-05-07T10:55:57.061Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h76d771ef5f8c45dd3bc2784d7e9512cb69835151d6984865dba3a9341e2924b6 +2024-05-07T10:55:56.663Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h0e30f9d6da8f95d6f10fc2f1983aa2aa5027ca669061d845e14ef55ab31cd5f2 +2024-05-07T10:55:56.601Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h235b7fa509aa5c34df93ae47727bf536e6e1fcd854ba4ec45881b7141a5c6f64 +2024-05-07T10:55:56.310Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/haee74542fb16de4d0439425df626c6e8f00431bce285c5f19cb31d6ad8d8e1be +2024-05-07T10:55:56.120Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h80501da728acf3fa9ef6de9c9b222279c5217c5ad8e3be51147015cd6487f88d +2024-05-07T10:55:55.869Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h2c7a5e361228038e66a64831d637521fc1b1df231c7e75031481b20459369b46 +2024-05-07T10:55:55.823Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h7b705735a903104d52a36f98f2099f7769003ebddd5551b64b5c3b4cc31c29ce +2024-05-07T10:55:55.739Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/ha8d761698753b697906c5deff265cffa44aa75fac2fd4ee8a1ef6b9e5da25ed7 +2024-05-07T10:55:55.515Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hfc7fb7494521e579ecb83edab942e4db5137c41dea1d1f41e20da03167bd780a +2024-05-07T10:55:55.281Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h745dcbb9efe4636c6d5414931f19415c096f0bc8acc4dfac4faa5468c7327774 +2024-05-07T10:55:55.104Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h35ac167ae21281c33dc1e0a1e5ee0a9433b5e0f346fad6b9d1f5522676269537 +2024-05-07T10:55:54.516Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hac41d1c9265f2832f74691f89c5caea55efa5a8b68658b2ec11a8fe955996f68 +2024-05-07T10:55:54.410Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h6b34e708452561f5a52c5c89dc3f37461dfdd8aba26dfd3239b854b3e4251428 +2024-05-07T10:55:54.319Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/h84d24010245e8c22970b8fe2ecf4e508f2f07b56511172f12b80e6623e46d10d +2024-05-07T10:55:54.037Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h828d27b870885deebd3bb19252bed3451a0b9cd523c1383f32f0ce8e4be47479 +2024-05-07T10:55:53.968Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h15d09449b3def23de14f1df371057c5e2c66828d6b8f88544326fca9825fc600 +2024-05-07T10:55:53.801Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h579d826fb1dec35e1a87c7dc559950a4698dd3b9cab8034655ee994e7644ede9 +2024-05-07T10:55:53.734Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hfd0a744cc14a92051b139b3233d476e21775e0b263ae3f833fdfd4591729dd92 +2024-05-07T10:55:53.366Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h1f9dcc5a5c460f3aa47480e3598e1e3eb74b27a98c7fbd47dc48a3905f023347 +2024-05-07T10:55:52.827Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hc4cce2e082133a6e24e4dfa1ef9a766031aa8b428b3435a368a1dc9a1a82bf34 +2024-05-07T10:55:52.462Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h7f043a09138060d339cf0d52384bd4bd1e9ed489b6d88ba1142658390eb0f187 +2024-05-07T10:55:52.445Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h007ac473c0aa693788b5ea9949eece148e7111278f4ebee9646b78d35ae0ef68 +2024-05-07T10:55:52.050Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h299c7bb7cdad21ab55306ac6883efff368c8dd3355953ce8c37648f9ee9c9f0e +2024-05-07T10:55:52.028Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h4ef01421ac3bcdd595b5053e0cea163ce0a6abcb31ce996dcfdc9c752ed407c5 +2024-05-07T10:55:51.988Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hf7489262fa67ebf2d399dcce38aa525100f2f063b23b125ca7bd7ce9197da707 +2024-05-07T10:55:51.930Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hb9b3bcff221c529b515c43f08568a5f23466d32044e812e04c0194643cd2b77c +2024-05-07T10:55:51.923Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h8745ba016d130a69a23fcc2de9b18d079b5a002898d16fac527c840e76f3fc78 +2024-05-07T10:55:51.899Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hb7d2a05ea65438ab8ea13985938d5a42d747db0fc54b93e91d28a3653da50077 +2024-05-07T10:55:51.501Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h69cde16b0cf63936b7ec8c5d8c9e7dfd7a385719bfb15d64a5fffeb0ab982e51 +2024-05-07T10:55:51.062Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h5359e030f4927ff64bb584df4ca13736005e8f0d1646e0d0a67b447dbfebfe10 +2024-05-07T10:55:51.010Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h825453d928bbece1584a6836c4afabe269f7bc0f5303e942a92310987119060f +2024-05-07T10:55:50.940Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/ha9797f17dcad85d8eba095fecba8b3f91bc493dbb9473e822768373a1b91b4fc +2024-05-07T10:55:50.668Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hb8fe6040371a1c1bde2ecd3751b17917a990dbc8f40d11f90ca9531ba916aeff +2024-05-07T10:55:50.654Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h22c0e693aa701087d63ad7dff48610dab3dcb1c9691acddd5d8283de6920959d +2024-05-07T10:55:50.454Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h3685651a916f0600a15ee0a293d85c342fb1417d7413f3e68a107d946c457e49 +2024-05-07T10:55:50.409Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h8e70cde2140c3ff3003f27a377fd12bddb8048f4a8cf594b0cb5cc8e76183124 +2024-05-07T10:55:50.136Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hbac0a32e2219df10982d44ff085a300366bd2997325577d72807f21deee1dc5f +2024-05-07T10:55:49.938Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h49fb83c71a12e3ff9c6833e5b7cb820405a9f754889b9d0f2f6f8b21feabe52d +2024-05-07T10:55:49.853Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h7f87faf18fd9e9d978cefb137d09e81101eec2147ccfe0f59ef271979168878a +2024-05-07T10:55:49.641Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha8c49869a31d77c6e5aa7c52d036c93ffef1af76504460950b940e511c0e6f2d +2024-05-07T10:55:49.391Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hef3a8100298e6ae3257a270b4c8b4e9b798b33b925255c44aa2b581874dd5d68 +2024-05-07T10:55:48.981Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h82acdbf514cee0fd369c66b91a3a8c22baf1c0719c8adf639b0f8e4ac8570534 +2024-05-07T10:55:48.915Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h5c6fdd3e8c24443c9113ce865cee9fed13525fc07757282e25469b67b85b0493 +2024-05-07T10:55:48.836Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h1b29cc569dbe2c2acbb1f680884314538b7c8ba83ef4f8068b02880a62ecdfa4 +2024-05-07T10:55:48.802Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h0ae35feb1b175f77b55d2e8c7421f9c69c142b06126d6a3d4e8e57d558afbe99 +2024-05-07T10:55:48.639Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h6910dda9ee993ba902458d312acd7a31cb979c90c7e06519969698a592684a75 +2024-05-07T10:55:48.064Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h0de88503bd29725769700b1f4edc88687879242798b8d5a274fe5bdd82ff8ffd +2024-05-07T10:55:48.047Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hd82d539e22ed88eec8e923f5567b389a8d389b509e4a8e255c24a5a4e1c4e6c9 +2024-05-07T10:55:47.785Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h649509dfd1a8dc625d9fe900585f6857c96d3f8139b7d7a723e1b2337d7d9d25 +2024-05-07T10:55:47.158Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h8a5d9bcec477716af89b34d479ce3372cf1def3fda9815c431f67df65c0f4737 +2024-05-07T10:55:47.050Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h63be2a19644bed8d0c777804a1588cef409dbe2c00f9391c00f20849141c0942 +2024-05-07T10:55:47.032Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h539040bf5294ae3ac791108a84f022840a9537ebb50d592607f980b1fcc48f49 +2024-05-07T10:55:46.373Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h2819bb5597fdfd432bc4478302b488d8d0e360180e279eb75c3d9850b6dbe6df +2024-05-07T10:55:46.224Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hfa3e04b9be8aa8f99c7d80b91c96fc073922348a3fcecb29b3df65f8348196e3 +2024-05-07T10:55:46.212Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hd90048d216438ede4f8f935cd748cfb206bce5c2551ebeefca70f4f29adf715a +2024-05-07T10:55:45.621Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hf74aa71ef6e095b822206fce0ae6f0ce3426ac87ec475adf8db8e09974ca9108 +2024-05-07T10:55:45.599Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hde067fb55bed8fd93b9c53f88c9f97c892ffe8305f0fbeeb6a1f22246f65bcb7 +2024-05-07T10:55:45.569Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha0af803ef30581f801f4408bc5b53cc9e16596de6b6603a544e96c5de45db1ba +2024-05-07T10:55:45.213Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h04f4e900abe680194925fb3e76bdf789670d78988a1dc0ff3ab092324ab577a5 +2024-05-07T10:55:45.209Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h032154d591fdb1359c2c41e7deeef601e37015bd580cf04b2695247a89a3e0ec +2024-05-07T10:55:45.005Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h35d3ddd90fe7cc1d88a783d83c064e6f82b9a9e7847703da280273d372dea274 +2024-05-07T10:55:44.572Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hb9aed71559d8964cf52c00397839a6404a4ed78fc6c2cd7f44c5a3eca81e176d +2024-05-07T10:55:44.528Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hac6b02ab5608c734cf1b90323a86812fb9c6c2fa545f086f1dfab6b11eb85dd4 +2024-05-07T10:55:44.488Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hfc32dc48311f85213c6a8931b6bd5b885d4407f0a8c88978d29bec196fcc6829 +2024-05-07T10:55:44.040Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h0026460d89214cc08ceb02d168f8f0f5611c51e4910b34facc986dbcf71d52aa +2024-05-07T10:55:43.678Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hd91b7e6cb0dca38eb98ccb07ffbb6530de4816dc401f4bcb9e3be06f21f13754 +2024-05-07T10:55:43.641Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hc59da454b829a27bff78bd5fd947c2d2025b66598811ffa5e597ad2b3d1e4882 +2024-05-07T10:55:43.403Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h93fbdb49309735df265fdc2352362ec5fe1ea248b0974bb056182ff95f007e43 +2024-05-07T10:55:43.020Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hb5f3467ff0fff35babcd9a724335f3fdfcf0d8a4502a7b4ce7e62ad90755746e +2024-05-07T10:55:42.906Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h5aa99192a6fbda39a4b3898285587abcff1a464862650e241d8604a1c5a2145e +2024-05-07T10:55:42.781Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hf73344011936d023bf491a513ee6c5723dc186c7de42e62a6369f8704b984768 +2024-05-07T10:55:42.444Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h827b0c03ede436d629a6bc638675e0a6dcb33ad448e59255ddb98412a79d1ab8 +2024-05-07T10:55:41.759Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h1adc767771ff400f758bb9b4835222f97b36366d72dd357f3f9e1ee25db83da0 +2024-05-07T10:55:41.596Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h1536c94117665a51ffec90b4ae9935476c4caaaa9adc248e8c2c380b53c314e3 +2024-05-07T10:55:41.499Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h97c0cf32af1fe12dab409b2f4af782c7cf52260cd6e18800ee93db06be1f3bf2 +2024-05-07T10:55:41.425Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h949919f118a7597dc09c3180e17faf2090f7ad539888144083928a60bda0c9bd +2024-05-07T10:55:41.005Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h677628ee471206064fb46369eccec810ac889901b1d260fa78d9586f4dee0d6d +2024-05-07T10:55:40.579Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h9f974f21a7c6d08acb583dbb4e29e496f5cfa36e71cc735107284cc8d3388abb +2024-05-07T10:55:40.531Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hd791d06f76f54c5e257e9b5202fbd5fc2e69ee963270af2651cb661bc6cbdaca +2024-05-07T10:55:40.064Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h8825b57db111e44cefaa62398e8246d6fbc3a8b12db4096abab8fe7c2caa3dee +2024-05-07T10:55:39.950Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hf5d97afe82691ba7523c3d340f3ad0ac6ac58777409b4fdd6d8c76c573a13bc9 +2024-05-07T10:55:39.931Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h343eb29563063b5b92b6a89734ef504f922dd9f00e52c2f394799d4cb5cef8f1 +2024-05-07T10:55:39.798Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h72125e86eb577efbe823d080de6d06d2c252508a12dd2cb8c5ad5937e41364d4 +2024-05-07T10:55:39.240Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3828c69adb37d6c09c2cdaead618580b1df0ba0127c885151aa566e1ecb0fcfa +2024-05-07T10:55:39.113Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h9f75dc9d7919f082e16d2cc37f4d614bb3f786f6e13a5dc91759361c539eba63 +2024-05-07T10:55:39.008Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha59968f61191e5c598b4787742568083da60be957cd27d1f52faecbd921ece99 +2024-05-07T10:55:38.511Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hc46a8b62524e37e99d4ef5352cf61f1f00275ea9bcffd51a2c11bb131e618bae +2024-05-07T10:55:38.478Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hba78989a5a47e1fb52dd9d3f378791b6e45b7835bd3a485442645cb3ee5fcab6 +2024-05-07T10:55:38.358Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hb691625d3149ee8145dd83191b1f76602ee8f85a36c7d0d1fceada146071aac7 +2024-05-07T10:55:38.322Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hf09f47d1fec32e70a28cc28bcee6e84e4bbd619cee7ee11a4fc59c891adc50ce +2024-05-07T10:55:38.030Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/he544409b10aab21d2f626fb9840f9ffe70560e227519da2dc8f9897fa7fd5878 +2024-05-07T10:55:38.008Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/haff9dc5046934a24ef4f9675d0919efdf545942f3fe6754796f694a3326c1f7d +2024-05-07T10:55:37.598Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha3ba1eb9da3837495def7a8e41a14ce57c96e2edcc359f4d9ed4c855ef7576df +2024-05-07T10:55:37.591Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/ha40718f4cbb1e6c8786992415e8d598e63fcd279767b67804f187fec450ad5ce +2024-05-07T10:55:36.567Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h8e7d51cca82ac64459f5bdc309d66a12ac17cc2762b3605734d86dcd281e4c10 +2024-05-07T10:55:36.562Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h64000ba858f5c470478bf5c136d8b8d27667f39d5b0aa7bff5059a25238dcdda +2024-05-07T10:55:36.453Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hdac2bfe1e2cea0825027e61c4112a35f86bd62da11a7bd0422f7d080e3728c46 +2024-05-07T10:55:36.438Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h74c51a18cbd5ca12c8abbcd8828b894829304fd4f704389891a43e835110c6b7 +2024-05-07T10:55:36.236Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hd551f303ebc9cb46ad6b887c5790533583de374fe9811b6b4423b7864ecdbcba +2024-05-07T10:55:36.071Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hb327379389b59b12d75ab49623658924065ff6b4fa961a722d507217678f21f8 +2024-05-07T10:55:35.799Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hc027972c04e131a6c3b97966ddb7b503bd11e78eff84999f30d07f021826fa1a +2024-05-07T10:55:35.449Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hddafe23eac50ae1c1cba145d5814113d489dc7094c9986cb19671e3c9a65a79a +2024-05-07T10:55:35.248Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h5d380051fc6d650ae98c4bda4c2a32915ffb190cfa51a1120d8ef49182ff4ba6 +2024-05-07T10:55:34.853Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h6020eac7adec01be8a17ef19dcb9884c456f6a1e6c4c37595d741af3265e51b7 +2024-05-07T10:55:34.763Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h751ef0edfad672d35bcb31d73974c0b932df352163b5a0762d6f94b93d8315c1 +2024-05-07T10:55:34.620Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h0a1816413308c89a9603b37bf66541d1a6ecc1291d3754369ad6ca26174bdac5 +2024-05-07T10:55:34.591Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h3f6ee32eaa24b1d5fb51b13f30812e7d8352f4a05bdb742f22a5a7334e48f907 +2024-05-07T10:55:34.542Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h6f6224860fbbc0263866f7ebaa3432db197e107d8f446b93f4b335c5c986b895 +2024-05-07T10:55:34.538Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h9ba01bd652bbf57654d3176ff8e6bbcadeb2cd3201e5559dd58c1a8b7fbe023d +2024-05-07T10:55:34.511Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hebb24fe95fcc358cd225976235d9ed995c1b225e96fc4490ae47e347b0055964 +2024-05-07T10:55:34.489Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hbd58f678cbfc8292cd80fb76ccd6f90b5e5cc09286b3b4f84792345a6384c344 +2024-05-07T10:55:34.280Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hdc68675342c95720cb44405ecf9966abcfcfe17244a889a000e6059b1be32f6d +2024-05-07T10:55:34.064Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hcd458e079f16fb681d2894bbbe3d77be6e8ba0e0c446ae2703678464b1926a79 +2024-05-07T10:55:34.012Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h991c9ae2a9e549677832c0e5f9093666a6ab1c325614927ca936ca99e3e0e5e8 +2024-05-07T10:55:33.974Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h353733717aa0f00523ee4a5f6a23a4a68767a6006c4fe514fa35707a4d2c4985 +2024-05-07T10:55:33.769Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/ha80ce7f579eebdd565cb18bb26115dee38760fa5b21f3f66739ce8b8af4f6fcc +2024-05-07T10:55:33.519Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/he0db8b020e3d11751791ddffb8ae9a3724d37dd109823ca98272bde53f63e703 +2024-05-07T10:55:33.313Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h0b32fda7c69e8fc864ad69fd54eb41fddd01f62de8d110190485c9a961f96870 +2024-05-07T10:55:33.231Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h624a00474c2b3e27f7abb25c5432c956924cd4b3a809695c527a01ed352845fe +2024-05-07T10:55:33.077Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h743d8efddbc42a4cc101986738984dbc46b3ccc59baaffd92889b290edf4d3f2 +2024-05-07T10:55:33.026Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h95ab367461188086703542a329dbe6b3b9ff54848d9b11ad8cb01cec96839e73 +2024-05-07T10:55:32.728Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h60ae095337c0d2b68a7ab8c4a0fea126e7fa3adaaada7597410c431248ae8b5d +2024-05-07T10:55:31.743Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/ha6d78f570db873d422aa293718025db89efa1790fb0360d0c440f848ed4cc679 +2024-05-07T10:55:31.612Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h5ba7fc3b3e068c0eafb59fc70c02a54605b1a9a44eade28e230144fdb47f9426 +2024-05-07T10:55:31.598Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hadea525341bdae9d19eabeb4c65e670349316f39cbb093c32db77dbc0e8c048c +2024-05-07T10:55:31.304Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/ha96e8dd1e1783c34953dcf0f59e338e16f6335f4b6a71dcbe952659f606a8631 +2024-05-07T10:55:30.814Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h648207e29a4bff8df7dde46f53645d9fa83959df0aa911ad636660b59c6c9827 +2024-05-07T10:55:30.809Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h65f0b6430e391d7fe4eec9c7e991dd321809bfd77b8fd3ea527262911fb54fbe +2024-05-07T10:55:30.655Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/he6edc29a5acac0cb63699e1696f1a5a55e6b97f7572452f9c0a2221c1969ef2a +2024-05-07T10:55:30.422Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hce3b00bb16ae979be576fcc2e17d736e0b5e471851f6c23cc5bae159344a3dda +2024-05-07T10:55:30.191Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h3fc79a5ed844199ecf61c035219c956a009bc5746c9b5bdc269cb01ea2fe1e9b +2024-05-07T10:55:30.015Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h792cf3a05c831c680c9aaead3983b0c055819f007385c928b2bf2a0a2e98adcc +2024-05-07T10:55:29.859Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h636924f15ec0db827fded733672e4da44714495084b3e0f258c654d109abda63 +2024-05-07T10:55:29.669Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hd5b8b6d14171b849234a0ffdd2f67c5ca10dfb66dc20506489057f3b958f67fc +2024-05-07T10:55:29.599Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hfff2c52cba87f309037f7a2626452b5ac4c39e6dd2026574ade649d765de5ecc +2024-05-07T10:55:29.493Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hb063e675a9c589b96eb8691d053f91b4e76b33c3caf1adca965b8c2d589d83f2 +2024-05-07T10:55:29.239Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h5138e090db2549e39020db3593b3d00241447104b400001ef57eb344c3126bbc +2024-05-07T10:55:28.916Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h1edabbc92ede17aad7a16539821498095dd815a9e4a719225590100d84fc2b72 +2024-05-07T10:55:28.898Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h25ae45c24a9b8d68d67d2da38af253671b2e9be33fee5b4b7b15029babb49c0b +2024-05-07T10:55:28.637Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h206be1693170d0711599d2cb95f572a737316e4469c2279ca9bda280416b350f +2024-05-07T10:55:28.602Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h400e846cfd3273729666538c2b7992f54b75ce8ac463716fb3be24c5ccce180f +2024-05-07T10:55:28.472Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h197c00a8998054a1aea133b231112142e1405c75674f98477a16e25375aeb320 +2024-05-07T10:55:28.423Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h67e5775a90f9e28ddec330f92c93ac7ff276941739854fed1bc1491703775dd9 +2024-05-07T10:55:28.388Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h52d17f8dca9681e8f59becf55a3815a12ba87c73f435680256fd32827fb3e76d +2024-05-07T10:55:28.295Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h1fccae1fbaa6d5f55aaf01b5fee034399fb81e989c2a158b5a5698c823ae5e5a +2024-05-07T10:55:28.212Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h43823f14852c6cb06f1658dfd877671bdd8cb54f0c3db1cc153e900db64a6e69 +2024-05-07T10:55:28.036Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hac48d3ae4c4eaa1fede04bd2695ea3fc973bd9f04d37f8893b2ffcf91d9e3f13 +2024-05-07T10:55:27.509Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h8d7273e3cdf3b963ff3401f06a42035f6efae63d342f5ff617875dd3840bd6a2 +2024-05-07T10:55:27.433Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h6f85a4e0663d4a5299b3eb119af5a5f6dc3323c364e2ac1e4941ccae0b3d4017 +2024-05-07T10:55:27.424Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hbc3ff9556a6f6255a748a417cf8eb7f0bc985d3bce2b146eafcb3159b1e5ac90 +2024-05-07T10:55:27.208Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h3e8e36303f0f595cc36120d17d64b5afaf6cad7cce99e1b841a74259636eaf61 +2024-05-07T10:55:27.200Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h1fd5c132cb8362a44597d8ad090121c4a1090be1358422d489a060b67e17f2a2 +2024-05-07T10:55:26.789Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h4cf603ef30050e68818e7bccdc58e556850067e5d2e16ee03b853ca4493c02e2 +2024-05-07T10:55:26.772Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h0740323725f873ec1059579ef1a74b68ee7bf349f1bae86631b16316d2d58926 +2024-05-07T10:55:26.445Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h279caaadc7a093a809df10622252872edcfb625961adf1d86800fc1d808d00f5 +2024-05-07T10:55:26.337Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hb1a265b10d1ef77e0c7be4f10a2f91b3ac36f23d525a4ba7eba9f9761a1ab073 +2024-05-07T10:55:25.966Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hd2dc316aceb9c9d12a9d2049f572198ff8a1ac7e974079336aa9f3ea6287fca2 +2024-05-07T10:55:25.616Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hb88a47ab67a113eb76d8216fafca1550d0f83854c14c81f2cada8576bc4d2913 +2024-05-07T10:55:25.603Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h7ef1fa04aa9e96e45fb223da187311291b99d56b77a3469713f69dfd4aa05fb0 +2024-05-07T10:55:25.531Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h7824a391fe116d334d469b3374ad0c9616a977a41b09c9433d3158f44a2ece78 +2024-05-07T10:55:25.437Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h30552eb2727d43d1f17392ce7130d3ff048cc47974502c557b5962bf5d4db562 +2024-05-07T10:55:25.344Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hcf933d21a571cc7e23b7c3c9031af5b9ca89a3e97cd005a1d7400a7a189cf75f +2024-05-07T10:55:25.236Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/h619a0d9f32b2a3b25af344ba4ddd09f61f46aa4066eb207f158ed3081545e6ca +2024-05-07T10:55:25.061Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h76a4f503a5835443a2bfe0d9686a00a664f25a2bb39c9d89d24df146030868a6 +2024-05-07T10:55:25.009Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h9b812f74e40c5da7e3f431dbe35bbf880370ee29131f8d95c1f602d42cb152c6 +2024-05-07T10:55:24.767Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h963a03fd920573fa0aa01f527728340f54e0dd8444a7ff61b4268107b25bc29d +2024-05-07T10:55:24.367Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h485d067241ccd93ecb6072c6c948a5ba65c1347ccab3772c493416dd603e773d +2024-05-07T10:55:24.241Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h5c45b9a4b55af37bbf62297e9376d04154634463a160314d47e148c52f19c088 +2024-05-07T10:55:24.180Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/h3c43f977955b1648e82557071e176b39bc57613d80092d7f18eb8ac290071b47 +2024-05-07T10:55:24.104Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h07e9dc8d02a317b3122fdb582cc5c3dd9ffd28f4c620ed4089769932eccff0ef +2024-05-07T10:55:24.027Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h61c5f9c851f3e89952b75b709740922a662a056ac873864b14b1b958274249b4 +2024-05-07T10:55:23.498Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hc38d439a233a8fa15bb9335c5e928ba58f81cad7432135c2bb4d659dec0f2477 +2024-05-07T10:55:23.181Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h892efc48574ebaedd155e39033bce93805dfcb1a2ccc36cb96a14d024caa17d3 +2024-05-07T10:55:22.905Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hb1ab5f4d9496dcc3194c7a96e26bb0970942e60624cb41344d2138b2176d7466 +2024-05-07T10:55:22.641Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h4a0a7a5d87cfd499cc06e6422f3f03079ec7f78556de5343b2e20452c6c2262b +2024-05-07T10:55:22.619Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/h49fc8f0fe4551b6131331d4d80b87c2b8c1a21904829fcbcd2aea1b4b5ea5a6b +2024-05-07T10:55:22.440Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h24443c219a4fa188f4e62b5a93bd9fa303714547e58d3ec32664686279ef4f90 +2024-05-07T10:55:22.317Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/heef3f4c773ab0e48d955a9b368122ccd208f5171cb545e07b87c37e58dfda041 +2024-05-07T10:55:21.961Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hc0e17d120d2b9613f8298fe9468897a55b31f94146ab5f97663459321318621a +2024-05-07T10:55:21.899Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/h9d6880ddcbe615593d8ea4aa2e64e42965edafa61bb24db4bff4f3e8f98c0fc8 +2024-05-07T10:55:21.537Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hed08636b6a4f86a156018b05e968d6e03e37c97a7a4605f491e7e1aa958fa0db +2024-05-07T10:55:21.357Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/he7c3c7c3e703f500548c33c006006e0bdcb881507b959289dc64cbffcacc2e7a +2024-05-07T10:55:21.160Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h4a2019e6153be1914f30336957c10b84e04c9e8aeb80bcc47416e71365d12b15 +2024-05-07T10:55:21.107Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/hf28f613397152d9479f5249c3c1064cbb0decc23d7e92e6400038f1615a73e3e +2024-05-07T10:55:20.984Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hef58b9f0f8b7065abb7dd3a197fe12fcacc76dd09ace56d5791438c2377af707 +2024-05-07T10:55:20.733Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h023845b01fc052ce930432ec486864e26bbe3bb82b09711a81ef107d8da39860 +2024-05-07T10:55:20.576Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h9cb971eadf50b8cc5853a330e50ac86389219446665cf8ecf34551fc84c38046 +2024-05-07T10:55:19.958Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h4e7ec8b6fc0d609707dedd4e20758a1548c9cc5fa4847b03cd96c594711b982e +2024-05-07T10:55:19.905Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/h20ed3f19cb3080ef5d2dc88b024e26f89f4c98bbcda5d4f06880c736a98418e9 +2024-05-07T10:55:19.721Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h6bd3a2c8d3f7e72f2c4b061769c6d0ee4d9ab7dde2b3dc8be20b250919822619 +2024-05-07T10:55:19.693Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h860a0e5391a56ee7df5925aa54584e11c798424607cf769dea30c2111f42119f +2024-05-07T10:55:19.642Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h8506853ec9f7b2e53903f9cf189c09b04050aa668199a1ebc977671582893ddf +2024-05-07T10:55:19.411Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hf73e0034b17f4b4085c552d42c94dae07927eab8c2b6796916ba63c51f1c786a +2024-05-07T10:55:19.410Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3b81c19666f2bfadf66c2e1022c7c88510188b708dd5fe812ecfef0dd1f6f83a +2024-05-07T10:55:19.054Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/h418dd4350f6291c522c933f8826e1193a8616cf809eef2c87f303d93a00e8452 +2024-05-07T10:55:19.052Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h6d7762bf0f4c125fb1391379f0aeef1b64f17cf522d8fdb53b6c0d15252c648c +2024-05-07T10:55:18.928Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hc7fb0f7e6160a1f71158759ac8cb329a65ea46f140de0f0433ebf2fabbe8de6a +2024-05-07T10:55:18.705Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/ha476954c59e76e56a220d954fc23e01b765e6eb3576b8abf9c111a3eec355b14 +2024-05-07T10:55:18.648Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h7da0df76b382ab441639e45becfb2237610b115fde1bdc3a82b6a359e8a5b638 +2024-05-07T10:55:18.460Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h47352bdfce33ae3947c958c5b97148e2f7679364f8b36722f9b3d77d21601c2a +2024-05-07T10:55:18.126Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hee8af105963ab690049d24288edb300402e0511523d761bb2ecd4f2e59187ef0 +2024-05-07T10:55:17.559Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h4701111682c9e1fee78823d4a2dae05c3ac32a55f0c922731382f068e8cfc57d +2024-05-07T10:55:17.327Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hfa8e343c88e887a17f9cd98a02d27ea96d82f269f980f9438f3a31969577a847 +2024-05-07T10:55:17.312Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hfcd477b88d604998b2dee2e2c95c044e2e1323ab648cfc796e1f94adb12307af +2024-05-07T10:55:17.009Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h0454bb9e5fc4e455bf32900d63358ff80bc3e742f0c6633a8a1826d8158f1103 +2024-05-07T10:55:16.904Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hd1ede44c616d245beac725f1cdfb473f408c5dbbf385fb6713b3f5847bbdd228 +2024-05-07T10:55:16.623Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/haed758ec750ebc27559e6acb1f429fd15e9730643e05c15d68328b3c5a8b592c +2024-05-07T10:55:16.500Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h0814860e624053a3db9060b4e406328f24efde6d077537c8c18e35dc642f147e +2024-05-07T10:55:16.298Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h1f2a79dea9e02ebef81e8fca0b45c5cbd82ae6c2a7262da38c534d8dd8a9043d +2024-05-07T10:55:16.239Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h3dd5e2831e1d621b1ad5dfa5c36a39e5e2d014059ac40e772b8012a8815eed31 +2024-05-07T10:55:15.736Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hc0c611eca965d8df1ce807519ddf7d3923aa269c8533e3f1d86ad02a2bde80d8 +2024-05-07T10:55:15.665Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h99f536dde21571186183810843a0c6287b62a71bc03b68a38660ca4255314133 +2024-05-07T10:55:15.580Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hf83c69737e4e245bcaf0792eba52c40c4cb3ed4cfa2bb5d6f1a0f2aafda8d138 +2024-05-07T10:55:15.200Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h265852fde7c96019269c3af60d17e129448ac514218822a9335bad0100d2cab8 +2024-05-07T10:55:15.164Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h54da453203efba4ba7bf7564122407a8abf5a168d62d27c7b56603e3481ea492 +2024-05-07T10:55:15.067Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h0076404b14c1f2443df4076b6600cfe6517ad6c5905d6f6425f66256d7303820 +2024-05-07T10:55:14.749Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h8a5926c08de70e47ba94371a7978234df19eb1accf8fa28537ec59482676ac60 +2024-05-07T10:55:14.709Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/ha54ea131dee58bf8742bccaa32ad091e4a50f751be4046a4955e77d5f392584f +2024-05-07T10:55:14.414Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h8aa44203183016f5d4cb8e09ce0ac5b2d31b7ae403dd815ac24d1277487749c9 +2024-05-07T10:55:14.390Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h232c7e21b8ee188bfd08c05fef30cd8c36eaefd68c7cc51f2a66bc8f6b895da0 +2024-05-07T10:55:14.221Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8cb4dbbd967e9eecede1ac1ce8346f23bb38d16f1b9694dd4543c64e903babfd +2024-05-07T10:55:13.734Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h4eaff7c1511e2da8dc62e1f7887c69669f68e07759e520df1c850d99cd0c0190 +2024-05-07T10:55:13.559Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hb7aaffb95a429b4bf26064b4bef5b90ee0e6ea43ae694a7c44abcffe4217ba6e +2024-05-07T10:55:13.440Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h4166e24dea334313ddea044f5d8a78caf9839f482f4cf21bdc020286aff5c93a +2024-05-07T10:55:12.852Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h38f866293ee365e187f98cd24b61bdccf12971a36b5320fcd64052e136614a94 +2024-05-07T10:55:12.804Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hcd43a7a16a1ec07fe0d5646fe330d1d2203f3adf542383124bc35ababae7142b +2024-05-07T10:55:12.792Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h9d81ff4ac3d8841c7b6a1f4e73e710b63f0dbce43f39891203b8e5ba3c9bd133 +2024-05-07T10:55:12.788Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/ha6486876a1b98ad04e29ec037805be26b8ba26ee08930ae1ad38f5ce54fa5a95 +2024-05-07T10:55:12.103Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hafee5a0cffe399d57aa829ced9bb4486c6ed3ee8b172cfdc33ce0873bd637a24 +2024-05-07T10:55:11.875Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h595f9c748be2fc1cbfa0c560a149cc1e524ac77a48c46069981e9231618c7c78 +2024-05-07T10:55:11.835Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h70f0e7a57d1f609e8ac5b74379c3961dda91d8a3b77c0f4039bac2c4f45c0f60 +2024-05-07T10:55:11.326Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h9ee194b7c1c1f2942d5210933bf3a6f0722b0e3e32c376e75d8ca1650241e4bb +2024-05-07T10:55:10.970Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he40feb41ec978bd6b4c0fb0f5ad275233117474af006814d903189630fd7dc5f +2024-05-07T10:55:10.042Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h03fbfea0654abc1b511c8c1b9964cfed1c7a18a692f1064022139f193bee5692 +2024-05-07T10:55:10.041Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hafce45d8993e0262adc70960f01a1b387dab4f90bc96587b29c1a18d7cac25aa +2024-05-07T10:55:09.998Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h1321ee5c54556a9be3888b90f1a7d5a9395eefa1d69381841e91cb7e358d101d +2024-05-07T10:55:09.946Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h6adba8aca2da5ee4c51c9ad1b75a87551a4b88cfa8a23822993b220d9df80335 +2024-05-07T10:55:09.906Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha1f656f926c09e12476aba88728acafb2547166c9be87f0cd173190d229eeadd +2024-05-07T10:55:09.906Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/he69d4a5bfda7ab2472dc71c85f87e4959c891d4fbbfed27cdafeee88a51c526c +2024-05-07T10:55:09.661Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h25c57f7902bded2962b9316f9f71d1354a0ef4d26d1732aea1c4ba01a0155895 +2024-05-07T10:55:09.654Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hc5be5fb3e1920ec32a9951290942f31febcdd3f2d425d5ee505b763aa32a68c2 +2024-05-07T10:55:08.829Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hc3601e64b86ad0e40b778a839f4ee7d61c5f76848143afb0f558590e5515fb30 +2024-05-07T10:55:08.815Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h225c32cb9ad5b8d09b89d6208324bd073c384687465ecb1573a700c3a0b30270 +2024-05-07T10:55:08.513Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/hcbde16b2088ac6b1bc11a7771c9d6104e06688efe5fb9ccfd3a3fe5079448ec7 +2024-05-07T10:55:08.320Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h358fb3c8b906199a7baaea71723aa6c85e6e2604203acc4be9dabd62101a7a14 +2024-05-07T10:55:08.136Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h03b8af00095be2ea15c44fbe1337a1d2a8d47cdc3fcc6d312d9c9bc5030c03e1 +2024-05-07T10:55:07.905Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h66a5d0192139f0218bc553684f01a8c05aba086ba716d18c1bc14637f3a611ea +2024-05-07T10:55:07.894Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h794e7cf96c5629611bb7657a063e37f3aa3b9f43dd10bc86476a7b7151e48495 +2024-05-07T10:55:07.786Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h766d4b7dfa4cbbcdd8b0d7ad59fa3054a4479ad905c51e7091b922569d415bc5 +2024-05-07T10:55:07.734Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/hf9f842e1123ca9fad06b409a7a04f2b4a15d3bfdfa25afd45fea72531cc684f7 +2024-05-07T10:55:07.454Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h0c71fde064fd467cea69639e6c70c26df3e474d64617452abb86de43752da694 +2024-05-07T10:55:07.395Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hb5da94522a45ba1786ed71b7a7eb3c86109d8788298e0ee8b4880f03c2a92c7b +2024-05-07T10:55:07.365Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/he2f59c6ae045543bb3f19c393675fbe524a1699f9f0213a4405da57ee52828d9 +2024-05-07T10:55:06.967Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hf9d44a3a3772d717ef42d32f926d53b88abccbf8a73ef5961a9915732e826cef +2024-05-07T10:55:06.892Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hfe121bba571735d872270bea39202caf5901a0cecf3e22f8ac8235f5f22d8234 +2024-05-07T10:55:06.833Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h00a01ae54b9a1634dbad1ada95c80ccb9fe2ef0980eabcc5d00fa99a5c64ebf2 +2024-05-07T10:55:06.797Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/h2cc47b1ac171d9b3d581607a8979b5520061256448482af9535408c9c21a86eb +2024-05-07T10:55:06.435Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h08eec8be241ed85ee80723519f212557f51c37f81b22cb96f611e6ff0ad7c1fc +2024-05-07T10:55:06.380Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h2836f426c44916d77fd92cfa5eb9f665ce04fe7734ab3b478dc626f213ea2142 +2024-05-07T10:55:06.362Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h3e73b7a2c28a5ec834007bd0355121145a4cb907d5143709dda12fb39261a9f4 +2024-05-07T10:55:06.136Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h70faf7d506e58f55a945ccb9ea7c18c522894ce11b5b42434ad67113e7644c33 +2024-05-07T10:55:06.064Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hd918d06c79aa6581b410c118b177efd03c0efba8fa4f6ef629000b6f44c8c24a +2024-05-07T10:55:05.982Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-do-newyork/login/h89a7ca2a0798da8f98aa52d8871f3453b4421ebea97ccd474e38dc7dd9d18b2b +2024-05-07T10:55:05.913Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h424f3ecc356945827092b32642d73a94c0a147860f648cd80b902b8073ef0e63 +2024-05-07T10:55:05.791Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hdf30c99e01e6dcf2337c4220d9eac35aa1739d9a894ff7d0fa517e923384998a +2024-05-07T10:55:05.166Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/hfc7dcfe6fd81a0108d4894db16cfaf8f147b0c6de6bf0b4f2705d8845d2bd6cc +2024-05-07T10:55:05.163Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hb18acdffb62d3f20f74b9296c2d10b04669006ddacd6a588a9234a87ed99e9ff +2024-05-07T10:55:04.891Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h9997617bf1392029b39f2d313943bcb67949d075780417b2d267cb481e3f3339 +2024-05-07T10:55:04.875Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hb42e68fedd3b6f559bce9039523751b39fe7cc21ca68d68616a06c35993dd979 +2024-05-07T10:55:04.033Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hc2c0e109f68a34f322810cd8eb26ba267ff13a608eaae5bf4d973ab10edced57 +2024-05-07T10:55:03.992Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hbfcd850245e1a1f47a562494473d1a6b4f3fcf3b8042edfcc692f412defdcd1d +2024-05-07T10:55:03.230Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h76334fff074ee5ba13551dd6132eafed07a46b456f9897a9be3e8221f976fec3 +2024-05-07T10:55:02.754Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h5ca9eaae65f927e0fad5e12a69b1ec8de9bc389161dac4c39cf18afa0d0ec0e6 +2024-05-07T10:55:02.675Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h6726f1a0ce27a88e2a94a08c4dd591d6d005028a33e22c7c34573e889325fc7d +2024-05-07T10:55:01.895Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h56f0cee5ff3af31c55c8b615b9b0a1b8d348840fa4cc9b5d0e1a83286e0dd93f +2024-05-07T10:55:01.891Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hacce56eee0a4b0e5268933ce31aeef343a2d9986858440bf3559283c4a40c84f +2024-05-07T10:55:01.690Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h8ea0e3d6dfbd666280a1ae95a1f02a79a1bbf412c94c29f445a71cb4b4ca577f +2024-05-07T10:55:01.179Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/h803f71e9000a78e0831557e08aae58f70a8c5a08716d5ff9818dcdd1160a6467 +2024-05-07T10:55:01.028Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hed809df586776e2b1403eb4a76aad527b760b60cad8d724727881f8cb7005c54 +2024-05-07T10:55:00.935Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h6639cac5d2076f32ad5ca0b386e0fc2cc5a6db0d17db6c895e69d0258cc291f3 +2024-05-07T10:55:00.918Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h198c6cbda0e922b23809a67f5de2adddc85df9d1d0cd7774280492d8077502ea +2024-05-07T10:55:00.249Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h99bfab8bfb96f364c74f929c8ea74f560a30be6722ebd0479517714db9c72a34 +2024-05-07T10:55:00.090Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h6289c2a5063ee2bb6bbf520c26999f2f8cfa1abe0378cd2a016e7987d5553049 +2024-05-07T10:55:00.059Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hed3bc07e998e1c9cfea5c0c94fb0566a742b956bedb235242cfb961720eae0da +2024-05-07T10:54:59.758Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h68352c93c9333b4cd7f7b41eefc059130f6a49d9089b9df5c480f747f2c2a756 +2024-05-07T10:54:59.741Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hdb64a3f94ed6eb2eda228447e102b3a8c2d690cfdf8d19164742cda258141d8c +2024-05-07T10:54:59.038Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h8030afaff3ae8608bc287059443846ceb258a3e87bf0fbbfe008811b3f1be2c9 +2024-05-07T10:54:58.284Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/ha640d0ca7150440a81a6202545b0958156f96fa6d851fc8cce7613ab6dd986da +2024-05-07T10:54:58.060Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h7a1da44cb17e7dbef136e10fc0193275cac05b7b141d07be685075fc117b3aef +2024-05-07T10:54:57.995Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/he8b5743202c52657955a0c76aac092fe83091804ce8bde43137baddd1c81e5a1 +2024-05-07T10:54:57.986Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hc90a6f5b32a899d74850f4e848e89f401852d88e7a19b7ecf4cb10ac455b8f8c +2024-05-07T10:54:57.222Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h922be9b1b6ee37d910b62cbee70b2a5ddbbff9585e0e5558f48d145581e99890 +2024-05-07T10:54:57.125Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/he8ef7216d5c2945936215689ab4a2f124dc4220a1d5f0db95eb29ea2cd7b1fb3 +2024-05-07T10:54:56.618Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h284f18f14b3db25db61500e9e322d55540fbeb04dcfc310e7d7717cf169b11d2 +2024-05-07T10:54:56.460Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hf94e8da0dc4e938fe08921058cff87c1bae99a9542f214a41e776c27d1102342 +2024-05-07T10:54:56.410Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/ha0bdfaf8e94622740da97a4efc7d2cda2496328dd0cbdaa89fdc1f96080be6b5 +2024-05-07T10:54:55.607Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h787b624f68899b1e8e706a5441651da560861143ccb3be4b2fabad82176e325d +2024-05-07T10:54:55.560Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/h6362f5e3fdb28f943a328d9993ef4ee05c3d8ebd014f3dcb8593ee2fbc723e29 +2024-05-07T10:54:54.630Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h1a72099ea9a8d45ec83ec90e37b79f391dab911a05c259031bdf9fe14d31afed +2024-05-07T10:54:54.046Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h62ae3d337327d3d56712c66fa6e56b1c146b9f887299bd92c6add3832f8e7e41 +2024-05-07T10:54:53.994Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha4b286f071b0864722810f484158e87a30170bb4bb345e21e5fc29f4331360c4 +2024-05-07T10:54:53.815Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hcdddbb5f8fdde6a975945473aa5e3e38b8f4a4a2f7adbd693a9d22ac2b298058 +2024-05-07T10:54:53.591Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/h681662a96f4d1950600ca4399bf8f5fc68e7f68fb349aa17753d7e230cb4bf6f +2024-05-07T10:54:53.547Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h739f4ab3cc905269bb15b09a4fa20a73d7cd042cba66bb5db5e3410bf7bfeaf2 +2024-05-07T10:54:53.515Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/haf11d4aa4fc7a48853f774a040119fde1ab51150ef30586541ccdc1ee3fedea7 +2024-05-07T10:54:53.506Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hc52c317bcffcf3cb9b9a74115e5ead74514974baaf967b24a41184b212f802af +2024-05-07T10:54:53.346Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h30d450716fb64044b527b908c465c212f9417625c983764675645c84e32a3e34 +2024-05-07T10:54:53.038Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h06af9fc4fce6ffe97ff546a69681cd4a07f67b4408137980f79eca03dfb9094c +2024-05-07T10:54:52.850Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/ha04eaf61faed51bce819e6ab91d305cfc4a809db9d402830102b3f629b2b45b2 +2024-05-07T10:54:52.824Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/h6d9e350963a9fcf3e7e14ac9974539854067ad2eb7d75ed1d31bd84f57460a6d +2024-05-07T10:54:52.608Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hd823d95dbab99e3058ced199cdd63716fd9fbd4b06486267ed8d4f62880840e4 +2024-05-07T10:54:52.580Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/hc279617c2eeaaa0da41a9afc64306c3dae2b5c97a5a7a3af2d43445a6f62b5df +2024-05-07T10:54:52.070Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h5c171c865e6cabb4bb2d6358f28de898e4c6ee03770ea799680adea0876c1c9f +2024-05-07T10:54:52.044Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/h403a68c1fda4cf9f639d1ba0f670d8d68234c01aae2610a3657c5e631698938f +2024-05-07T10:54:52.016Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h28fae7beb7890eb2d154b9e985cdc6726705252e859a4d004c8dcdcba71b3ef2 +2024-05-07T10:54:52.001Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h64df43fd2ec524a0a3802d4cbad0105cb41c6a780da87c5a8395d02c2b254e98 +2024-05-07T10:54:51.966Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/haf1f65d9eec540338017e15622655da0a2bf67658b68009b8c898feeb9436bad +2024-05-07T10:54:51.935Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h9b67149d85bb1928b76ffb0e4fd0da11c101c7dcc05f15e9709622cf9a10c189 +2024-05-07T10:54:51.898Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hca5f88527985b9617ecf8929ea8015e50680732843a7ccbb3d7fe4b9d48558a8 +2024-05-07T10:54:51.590Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h13262228e7715c6b2ccc2698383f19eec5916e664c9405808d5f0f61d407555b +2024-05-07T10:54:51.447Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h418d333d1363ebf5f5db57fa136632706a2588dd556d5558ddca8775850e08f9 +2024-05-07T10:54:51.184Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/h8b195d0416578afeca0f892d3322e04d3213b6da10558a4bdd28c2d1205eaa8e +2024-05-07T10:54:51.037Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h03ceca0e46aa973580545d0f9bfccab930b761a0a0435f45211f0a4199c11f8d +2024-05-07T10:54:50.970Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hb54415065520addd081f46edb3039c7744d753ccee9052e84ff4c9061f5540ed +2024-05-07T10:54:50.919Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-1/login/h6410eb7536aa5829b13f5ac64d4f348eebcbe0b71f851bfafd04fc4ebe6d3ea7 +2024-05-07T10:54:50.618Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h17866f0a37d66c7a1f72c528ba44a187b4dd85c7eea0e2ded781002e45d1d0a1 +2024-05-07T10:54:50.477Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/h037906892fe927dc953458340714e842301d9d72c1617f53c22cb03742ae1554 +2024-05-07T10:54:50.448Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hc3a090ca7116d84a0f4b6ec4ac240cbc33ab5cb684a32209deed0cc4716f3bbc +2024-05-07T10:54:50.326Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hc1b9880a56b7a29d64ea63184ceb4dacf8305eb8646359f28d4a258c1684f1f4 +2024-05-07T10:54:50.315Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h3cdaa3d3dfa52a7259e2aeaabf41f83d2ea4699fc748540c4bc121cf5c1f2749 +2024-05-07T10:54:49.684Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/ha7752c85de0fd91d07f3a4e6a49de4632d032035084033af915dd8a750492309 +2024-05-07T10:54:49.610Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hcfb669098e9c45b04e29510aa4619b87d03911616bd8a71a0c361557388396f1 +2024-05-07T10:54:49.531Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/he5718c0ff9e0d54b2ba39f4853a3cd50f2fe6a57dd4e4983e19dbe0bea1ad3a7 +2024-05-07T10:54:49.437Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-3/login/hebf5949c38c925405c87d133a3d20f3e2249d93aec44a794ef7bff63ccac11b9 +2024-05-07T10:54:48.899Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/habf62e02e388a3fbf2140fbbaaafb0aee88eb1e6a3e975dd391d0621f5a0284c +2024-05-07T10:54:48.896Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h210b94f72d71546ba67e995e412d922d25b701309a9436194ddff7c4468b4b46 +2024-05-07T10:54:48.881Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/h302654dbefe8bf9968b637f648e89b35c5d9c58cafbba92bc99de0c387ee56f3 +2024-05-07T10:54:48.804Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/hf3362d92c2653f64ddeac62a9248b16ded8a993039bfc941e5b07f2f49ed3267 +2024-05-07T10:54:48.700Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/h728713e53b96f4715ddd7ad284ba7b5f7c1816150c068c36a3fccf62bea679a8 +2024-05-07T10:54:48.083Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/h996e7c5d29f95ddd0bb373d792a0f49afd4e1e38bbfcbc5ef536ed3b84a46a48 +2024-05-07T10:54:48.004Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/pop-dev-lin-atlanta/login/h018b53d13739920df5a7a7d89a189766cccdd432dfe28c062589782682c84bdc +2024-05-07T10:54:47.464Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/hd467acdb1ce52a1307ea10192fea47fd2f035f370f30e9f73cfa890aeb25e61c +2024-05-07T10:54:47.286Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-east-0/login/hba807c7c107b9c0f2bdd6a8149ef7ec23a8123a5146427957c94bf465b189838 +2024-05-07T10:54:47.206Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-us-central-0/login/h7444b52b817e91665aa0a2e4691b699a180b6ab6b3bd346b91abe0ae584da86d +2024-05-07T10:54:47.150Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-2/login/h297c6b7d0f3ede17ef7992b2ee77a43d076f036050268cb66684e2ec3df96146 +2024-05-07T10:54:47.129Z [INFO] expiration: revoked lease: lease_id=auth/kubernetes/dev-eu-west-4/login/hd5edb60c3c54d838ee97124a6a7510fe170ddc05724063c6ecc9885c2169fe90 \ No newline at end of file From d4910d9b9df22d29a63e85fed4d9020faf4bf2fd Mon Sep 17 00:00:00 2001 From: Yoshitaka Fujii <76274657+ystkfujii@users.noreply.github.com> Date: Tue, 14 May 2024 02:52:39 +0900 Subject: [PATCH 23/47] docs: fix typo ? (#12898) Co-authored-by: J Stickler --- docs/sources/get-started/components.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/get-started/components.md b/docs/sources/get-started/components.md index 2ea5349d75105..641c950ad7485 100644 --- a/docs/sources/get-started/components.md +++ b/docs/sources/get-started/components.md @@ -31,7 +31,7 @@ This page describes the responsibilities of each of these components. ## Distributor The **distributor** service is responsible for handling incoming push requests from -clients. It's the first stop in the write path for log data. Once the +clients. It's the first step in the write path for log data. Once the distributor receives a set of streams in an HTTP request, each stream is validated for correctness and to ensure that it is within the configured tenant (or global) limits. Each valid stream is then sent to `n` [ingesters](#ingester) in parallel, where `n` is the [replication factor](#replication-factor) for data. From c6ec06304adfea3a37ebf8f0ecc3c633f41ba215 Mon Sep 17 00:00:00 2001 From: NACAMURA Mitsuhiro Date: Tue, 14 May 2024 03:58:53 +0900 Subject: [PATCH 24/47] docs: update NixOS wiki link (#12940) Co-authored-by: J Stickler --- nix/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nix/README.md b/nix/README.md index f8867899ad755..a1562096526cd 100644 --- a/nix/README.md +++ b/nix/README.md @@ -27,4 +27,4 @@ To build the repo (including running tests), from the root of the repo you can r Nix is supported on Linux, MacOS, and Windows (WSL2). Check [here](https://nixos.org/download.html#download-nix) for installation instructions for your specific platform. -You will also need to enable the Flakes feature to use Nix with this repo. See this [wiki](https://nixos.wiki/wiki/Flakes) for instructions on enabling Flakes. +You will also need to enable the Flakes feature to use Nix with this repo. See this [wiki](https://wiki.nixos.org/wiki/Flakes) for instructions on enabling Flakes. From 35e10d4004278a4726f6b73da2dfab455093e905 Mon Sep 17 00:00:00 2001 From: Oleg10 Date: Mon, 13 May 2024 21:43:18 +0200 Subject: [PATCH 25/47] docs: Update _index.md (#12814) Co-authored-by: J Stickler --- docs/sources/send-data/_index.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/sources/send-data/_index.md b/docs/sources/send-data/_index.md index f3106edf2352c..4fd5f7681ea5b 100644 --- a/docs/sources/send-data/_index.md +++ b/docs/sources/send-data/_index.md @@ -60,6 +60,7 @@ These third-party clients also enable sending logs to Loki: - [push-to-loki.py](https://github.com/sleleko/devops-kb/blob/master/python/push-to-loki.py) (Python 3) - [python-logging-loki](https://pypi.org/project/python-logging-loki/) (Python 3) - [nextlog](https://pypi.org/project/nextlog/) (Python 3) +- [Rails Loki Exporter](https://github.com/planninghow/rails-loki-exporter) (Rails) - [Serilog-Sinks-Loki](https://github.com/JosephWoodward/Serilog-Sinks-Loki) (C#) - [Vector Loki Sink](https://vector.dev/docs/reference/configuration/sinks/loki/) - [winston-loki](https://github.com/JaniAnttonen/winston-loki) (JS) From f66172eed17f9418ab22615537c7b65b09de96e5 Mon Sep 17 00:00:00 2001 From: Karsten Jeschkies Date: Mon, 13 May 2024 22:24:25 +0200 Subject: [PATCH 26/47] feat: improve performance of `first_over_time` and `last_over_time` queries by sharding them (#11605) Signed-off-by: Callum Styan Co-authored-by: Callum Styan --- pkg/logql/downstream.go | 129 ++++++++++++ pkg/logql/downstream_test.go | 6 +- pkg/logql/engine.go | 10 +- pkg/logql/evaluator.go | 22 ++ pkg/logql/explain.go | 13 ++ pkg/logql/first_last_over_time.go | 270 +++++++++++++++++++++++++ pkg/logql/optimize.go | 2 +- pkg/logql/quantile_over_time_sketch.go | 5 - pkg/logql/shardmapper.go | 53 +++++ pkg/logql/step_evaluator.go | 17 ++ pkg/logql/syntax/ast.go | 13 +- pkg/logql/test_utils.go | 3 +- 12 files changed, 527 insertions(+), 16 deletions(-) create mode 100644 pkg/logql/first_last_over_time.go diff --git a/pkg/logql/downstream.go b/pkg/logql/downstream.go index 2e3dbfd014a1d..f28eaddde3f1a 100644 --- a/pkg/logql/downstream.go +++ b/pkg/logql/downstream.go @@ -301,6 +301,62 @@ func (e *QuantileSketchMergeExpr) Walk(f syntax.WalkFn) { } } +type MergeFirstOverTimeExpr struct { + syntax.SampleExpr + downstreams []DownstreamSampleExpr +} + +func (e MergeFirstOverTimeExpr) String() string { + var sb strings.Builder + for i, d := range e.downstreams { + if i >= defaultMaxDepth { + break + } + + if i > 0 { + sb.WriteString(" ++ ") + } + + sb.WriteString(d.String()) + } + return fmt.Sprintf("MergeFirstOverTime<%s>", sb.String()) +} + +func (e *MergeFirstOverTimeExpr) Walk(f syntax.WalkFn) { + f(e) + for _, d := range e.downstreams { + d.Walk(f) + } +} + +type MergeLastOverTimeExpr struct { + syntax.SampleExpr + downstreams []DownstreamSampleExpr +} + +func (e MergeLastOverTimeExpr) String() string { + var sb strings.Builder + for i, d := range e.downstreams { + if i >= defaultMaxDepth { + break + } + + if i > 0 { + sb.WriteString(" ++ ") + } + + sb.WriteString(d.String()) + } + return fmt.Sprintf("MergeLastOverTime<%s>", sb.String()) +} + +func (e *MergeLastOverTimeExpr) Walk(f syntax.WalkFn) { + f(e) + for _, d := range e.downstreams { + d.Walk(f) + } +} + type Downstreamable interface { Downstreamer(context.Context) Downstreamer } @@ -471,7 +527,80 @@ func (ev *DownstreamEvaluator) NewStepEvaluator( } inner := NewQuantileSketchMatrixStepEvaluator(matrix, params) return NewQuantileSketchVectorStepEvaluator(inner, *e.quantile), nil + case *MergeFirstOverTimeExpr: + queries := make([]DownstreamQuery, len(e.downstreams)) + + for i, d := range e.downstreams { + qry := DownstreamQuery{ + Params: ParamsWithExpressionOverride{ + Params: params, + ExpressionOverride: d.SampleExpr, + }, + } + if shard := d.shard; shard != nil { + qry.Params = ParamsWithShardsOverride{ + Params: qry.Params, + ShardsOverride: Shards{shard.Shard}.Encode(), + } + } + queries[i] = qry + } + + acc := NewBufferedAccumulator(len(queries)) + results, err := ev.Downstream(ctx, queries, acc) + if err != nil { + return nil, err + } + + xs := make([]promql.Matrix, 0, len(queries)) + for _, res := range results { + + switch data := res.Data.(type) { + case promql.Matrix: + xs = append(xs, data) + default: + return nil, fmt.Errorf("unexpected type (%s) uncoercible to StepEvaluator", data.Type()) + } + } + + return NewMergeFirstOverTimeStepEvaluator(params, xs), nil + case *MergeLastOverTimeExpr: + queries := make([]DownstreamQuery, len(e.downstreams)) + + for i, d := range e.downstreams { + qry := DownstreamQuery{ + Params: ParamsWithExpressionOverride{ + Params: params, + ExpressionOverride: d.SampleExpr, + }, + } + if shard := d.shard; shard != nil { + qry.Params = ParamsWithShardsOverride{ + Params: qry.Params, + ShardsOverride: Shards{shard.Shard}.Encode(), + } + } + queries[i] = qry + } + + acc := NewBufferedAccumulator(len(queries)) + results, err := ev.Downstream(ctx, queries, acc) + if err != nil { + return nil, err + } + + xs := make([]promql.Matrix, 0, len(queries)) + for _, res := range results { + + switch data := res.Data.(type) { + case promql.Matrix: + xs = append(xs, data) + default: + return nil, fmt.Errorf("unexpected type (%s) uncoercible to StepEvaluator", data.Type()) + } + } + return NewMergeLastOverTimeStepEvaluator(params, xs), nil default: return ev.defaultEvaluator.NewStepEvaluator(ctx, nextEvFactory, e, params) } diff --git a/pkg/logql/downstream_test.go b/pkg/logql/downstream_test.go index c33f97ed74a50..0777822dbbb65 100644 --- a/pkg/logql/downstream_test.go +++ b/pkg/logql/downstream_test.go @@ -65,6 +65,10 @@ func TestMappingEquivalence(t *testing.T) { `, false, }, + {`first_over_time({a=~".+"} | logfmt | unwrap value [1s])`, false}, + {`first_over_time({a=~".+"} | logfmt | unwrap value [1s]) by (a)`, false}, + {`last_over_time({a=~".+"} | logfmt | unwrap value [1s])`, false}, + {`last_over_time({a=~".+"} | logfmt | unwrap value [1s]) by (a)`, false}, // topk prefers already-seen values in tiebreakers. Since the test data generates // the same log lines for each series & the resulting promql.Vectors aren't deterministically // sorted by labels, we don't expect this to pass. @@ -141,7 +145,7 @@ func TestMappingEquivalenceSketches(t *testing.T) { query string realtiveError float64 }{ - {`quantile_over_time(0.70, {a=~".+"} | logfmt | unwrap value [1s]) by (a)`, 0.03}, + {`quantile_over_time(0.70, {a=~".+"} | logfmt | unwrap value [1s]) by (a)`, 0.05}, {`quantile_over_time(0.99, {a=~".+"} | logfmt | unwrap value [1s]) by (a)`, 0.02}, } { q := NewMockQuerier( diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go index a25a726bb3021..fcbfcb450e683 100644 --- a/pkg/logql/engine.go +++ b/pkg/logql/engine.go @@ -363,7 +363,7 @@ func (q *query) evalSample(ctx context.Context, expr syntax.SampleExpr) (promql_ } defer util.LogErrorWithContext(ctx, "closing SampleExpr", stepEvaluator.Close) - next, ts, r := stepEvaluator.Next() + next, _, r := stepEvaluator.Next() if stepEvaluator.Error() != nil { return nil, stepEvaluator.Error() } @@ -373,7 +373,7 @@ func (q *query) evalSample(ctx context.Context, expr syntax.SampleExpr) (promql_ case SampleVector: maxSeriesCapture := func(id string) int { return q.limits.MaxQuerySeries(ctx, id) } maxSeries := validation.SmallestPositiveIntPerTenant(tenantIDs, maxSeriesCapture) - return q.JoinSampleVector(next, ts, vec, stepEvaluator, maxSeries) + return q.JoinSampleVector(next, vec, stepEvaluator, maxSeries) case ProbabilisticQuantileVector: return MergeQuantileSketchVector(next, vec, stepEvaluator, q.params) default: @@ -383,7 +383,7 @@ func (q *query) evalSample(ctx context.Context, expr syntax.SampleExpr) (promql_ return nil, nil } -func (q *query) JoinSampleVector(next bool, ts int64, r StepResult, stepEvaluator StepEvaluator, maxSeries int) (promql_parser.Value, error) { +func (q *query) JoinSampleVector(next bool, r StepResult, stepEvaluator StepEvaluator, maxSeries int) (promql_parser.Value, error) { seriesIndex := map[uint64]*promql.Series{} @@ -431,7 +431,7 @@ func (q *query) JoinSampleVector(next bool, ts int64, r StepResult, stepEvaluato seriesIndex[hash] = series } series.Floats = append(series.Floats, promql.FPoint{ - T: ts, + T: p.T, F: p.F, }) } @@ -439,7 +439,7 @@ func (q *query) JoinSampleVector(next bool, ts int64, r StepResult, stepEvaluato if len(seriesIndex) > maxSeries { return nil, logqlmodel.NewSeriesLimitError(maxSeries) } - next, ts, r = stepEvaluator.Next() + next, _, r = stepEvaluator.Next() if stepEvaluator.Error() != nil { return nil, stepEvaluator.Error() } diff --git a/pkg/logql/evaluator.go b/pkg/logql/evaluator.go index 7c1f450217313..ff887ff9b7529 100644 --- a/pkg/logql/evaluator.go +++ b/pkg/logql/evaluator.go @@ -595,6 +595,28 @@ func newRangeAggEvaluator( return &QuantileSketchStepEvaluator{ iter: iter, }, nil + case syntax.OpRangeTypeFirstWithTimestamp: + iter := newFirstWithTimestampIterator( + it, + expr.Left.Interval.Nanoseconds(), + q.Step().Nanoseconds(), + q.Start().UnixNano(), q.End().UnixNano(), o.Nanoseconds(), + ) + + return &RangeVectorEvaluator{ + iter: iter, + }, nil + case syntax.OpRangeTypeLastWithTimestamp: + iter := newLastWithTimestampIterator( + it, + expr.Left.Interval.Nanoseconds(), + q.Step().Nanoseconds(), + q.Start().UnixNano(), q.End().UnixNano(), o.Nanoseconds(), + ) + + return &RangeVectorEvaluator{ + iter: iter, + }, nil default: iter, err := newRangeVectorIterator( it, expr, diff --git a/pkg/logql/explain.go b/pkg/logql/explain.go index 4890d150f0a61..22240f5804b35 100644 --- a/pkg/logql/explain.go +++ b/pkg/logql/explain.go @@ -57,3 +57,16 @@ func (e *BinOpStepEvaluator) Explain(parent Node) { func (i *VectorIterator) Explain(parent Node) { parent.Childf("%f vectorIterator", i.val) } + +func (e *QuantileSketchVectorStepEvaluator) Explain(parent Node) { + b := parent.Child("QuantileSketchVector") + e.inner.Explain(b) +} + +func (e *mergeOverTimeStepEvaluator) Explain(parent Node) { + parent.Child("MergeFirstOverTime") +} + +func (EmptyEvaluator[SampleVector]) Explain(parent Node) { + parent.Child("Empty") +} diff --git a/pkg/logql/first_last_over_time.go b/pkg/logql/first_last_over_time.go new file mode 100644 index 0000000000000..e24133d13bfec --- /dev/null +++ b/pkg/logql/first_last_over_time.go @@ -0,0 +1,270 @@ +package logql + +import ( + "math" + "time" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql" + + "github.com/grafana/loki/v3/pkg/iter" +) + +// newFirstWithTimestampIterator returns an iterator the returns the first value +// of a windowed aggregation. +func newFirstWithTimestampIterator( + it iter.PeekingSampleIterator, + selRange, step, start, end, offset int64) RangeVectorIterator { + inner := &batchRangeVectorIterator{ + iter: it, + step: step, + end: end, + selRange: selRange, + metrics: map[string]labels.Labels{}, + window: map[string]*promql.Series{}, + agg: nil, + current: start - step, // first loop iteration will set it to start + offset: offset, + } + return &firstWithTimestampBatchRangeVectorIterator{ + batchRangeVectorIterator: inner, + } +} + +type firstWithTimestampBatchRangeVectorIterator struct { + *batchRangeVectorIterator + at []promql.Sample +} + +// At aggregates the underlying window by picking the first sample with its +// timestamp. +func (r *firstWithTimestampBatchRangeVectorIterator) At() (int64, StepResult) { + if r.at == nil { + r.at = make([]promql.Sample, 0, len(r.window)) + } + r.at = r.at[:0] + // convert ts from nano to milli seconds as the iterator work with nanoseconds + ts := r.current/1e+6 + r.offset/1e+6 + for _, series := range r.window { + s := r.agg(series.Floats) + r.at = append(r.at, promql.Sample{ + F: s.F, + T: s.T / int64(time.Millisecond), + Metric: series.Metric, + }) + } + return ts, SampleVector(r.at) +} + +// agg returns the first sample with its timestamp. The input is assumed to be +// in order. +func (r *firstWithTimestampBatchRangeVectorIterator) agg(samples []promql.FPoint) promql.FPoint { + if len(samples) == 0 { + return promql.FPoint{F: math.NaN(), T: 0} + } + return samples[0] +} + +func newLastWithTimestampIterator( + it iter.PeekingSampleIterator, + selRange, step, start, end, offset int64) RangeVectorIterator { + inner := &batchRangeVectorIterator{ + iter: it, + step: step, + end: end, + selRange: selRange, + metrics: map[string]labels.Labels{}, + window: map[string]*promql.Series{}, + agg: nil, + current: start - step, // first loop iteration will set it to start + offset: offset, + } + return &lastWithTimestampBatchRangeVectorIterator{ + batchRangeVectorIterator: inner, + } +} + +// lastWithTimestampBatchRangeVectorIterator returns an iterator that returns the +// last point in a windowed aggregation. +type lastWithTimestampBatchRangeVectorIterator struct { + *batchRangeVectorIterator + at []promql.Sample +} + +// At aggregates the underlying window by picking the last sample with its +// timestamp. +func (r *lastWithTimestampBatchRangeVectorIterator) At() (int64, StepResult) { + if r.at == nil { + r.at = make([]promql.Sample, 0, len(r.window)) + } + r.at = r.at[:0] + // convert ts from nano to milli seconds as the iterator work with nanoseconds + ts := r.current/1e+6 + r.offset/1e+6 + for _, series := range r.window { + s := r.agg(series.Floats) + r.at = append(r.at, promql.Sample{ + F: s.F, + T: s.T / int64(time.Millisecond), + Metric: series.Metric, + }) + } + return ts, SampleVector(r.at) +} + +// agg returns the last sample with its timestamp. The input is assumed to be +// in order. +func (r *lastWithTimestampBatchRangeVectorIterator) agg(samples []promql.FPoint) promql.FPoint { + if len(samples) == 0 { + return promql.FPoint{F: math.NaN(), T: 0} + } + return samples[len(samples)-1] +} + +type mergeOverTimeStepEvaluator struct { + start, end, ts time.Time + step time.Duration + matrices []promql.Matrix + merge func(promql.Vector, int, int, promql.Series) promql.Vector +} + +// Next returns the first or last element within one step of each matrix. +func (e *mergeOverTimeStepEvaluator) Next() (bool, int64, StepResult) { + + var ( + vec promql.Vector + ) + + e.ts = e.ts.Add(e.step) + if e.ts.After(e.end) { + return false, 0, nil + } + ts := e.ts.UnixNano() / int64(time.Millisecond) + + // Merge other results + for i, m := range e.matrices { + for j, series := range m { + + if len(series.Floats) == 0 || !e.inRange(series.Floats[0].T, ts) { + continue + } + + vec = e.merge(vec, j, len(m), series) + e.pop(i, j) + } + } + + // Align vector timestamps with step + for i := range vec { + vec[i].T = ts + } + + if len(vec) == 0 { + return e.hasNext(), ts, SampleVector(vec) + } + + return true, ts, SampleVector(vec) +} + +// pop drops the float of the s'th series in the r'th matrix. +func (e *mergeOverTimeStepEvaluator) pop(r, s int) { + if len(e.matrices[r][s].Floats) <= 1 { + e.matrices[r][s].Floats = nil + return + } + e.matrices[r][s].Floats = e.matrices[r][s].Floats[1:] +} + +// inRange returns true if t is in step range of ts. +func (e *mergeOverTimeStepEvaluator) inRange(t, ts int64) bool { + return (ts-e.step.Milliseconds()) <= t && t < ts +} + +func (e *mergeOverTimeStepEvaluator) hasNext() bool { + for _, m := range e.matrices { + for _, s := range m { + if len(s.Floats) != 0 { + return true + } + } + } + + return false +} + +func (*mergeOverTimeStepEvaluator) Close() error { return nil } + +func (*mergeOverTimeStepEvaluator) Error() error { return nil } + +func NewMergeFirstOverTimeStepEvaluator(params Params, m []promql.Matrix) StepEvaluator { + if len(m) == 0 { + return EmptyEvaluator[SampleVector]{} + } + + var ( + start = params.Start() + end = params.End() + step = params.Step() + ) + + return &mergeOverTimeStepEvaluator{ + start: start, + end: end, + ts: start.Add(-step), // will be corrected on first Next() call + step: step, + matrices: m, + merge: mergeFirstOverTime, + } +} + +// mergeFirstOverTime selects the first sample by timestamp of each series. +func mergeFirstOverTime(vec promql.Vector, pos int, nSeries int, series promql.Series) promql.Vector { + if len(vec) < nSeries { + return append(vec, promql.Sample{ + Metric: series.Metric, + T: series.Floats[0].T, + F: series.Floats[0].F, + }) + } else if vec[pos].T > series.Floats[0].T { + vec[pos].F = series.Floats[0].F + vec[pos].T = series.Floats[0].T + } + + return vec +} + +func NewMergeLastOverTimeStepEvaluator(params Params, m []promql.Matrix) StepEvaluator { + if len(m) == 0 { + return EmptyEvaluator[SampleVector]{} + } + + var ( + start = params.Start() + end = params.End() + step = params.Step() + ) + + return &mergeOverTimeStepEvaluator{ + start: start, + end: end, + ts: start.Add(-step), // will be corrected on first Next() call + step: step, + matrices: m, + merge: mergeLastOverTime, + } +} + +// mergeLastOverTime selects the last sample by timestamp of each series. +func mergeLastOverTime(vec promql.Vector, pos int, nSeries int, series promql.Series) promql.Vector { + if len(vec) < nSeries { + return append(vec, promql.Sample{ + Metric: series.Metric, + T: series.Floats[0].T, + F: series.Floats[0].F, + }) + } else if vec[pos].T < series.Floats[0].T { + vec[pos].F = series.Floats[0].F + vec[pos].T = series.Floats[0].T + } + + return vec +} diff --git a/pkg/logql/optimize.go b/pkg/logql/optimize.go index ef930be799664..53f1bc94ba397 100644 --- a/pkg/logql/optimize.go +++ b/pkg/logql/optimize.go @@ -8,7 +8,7 @@ func optimizeSampleExpr(expr syntax.SampleExpr) (syntax.SampleExpr, error) { // we skip sharding AST for now, it's not easy to clone them since they are not part of the language. expr.Walk(func(e syntax.Expr) { switch e.(type) { - case *ConcatSampleExpr, DownstreamSampleExpr, *QuantileSketchEvalExpr, *QuantileSketchMergeExpr: + case *ConcatSampleExpr, DownstreamSampleExpr, *QuantileSketchEvalExpr, *QuantileSketchMergeExpr, *MergeFirstOverTimeExpr, *MergeLastOverTimeExpr: skip = true return } diff --git a/pkg/logql/quantile_over_time_sketch.go b/pkg/logql/quantile_over_time_sketch.go index 42288830c2ddc..81cd3369e3498 100644 --- a/pkg/logql/quantile_over_time_sketch.go +++ b/pkg/logql/quantile_over_time_sketch.go @@ -448,8 +448,3 @@ func (e *QuantileSketchVectorStepEvaluator) Next() (bool, int64, StepResult) { func (*QuantileSketchVectorStepEvaluator) Close() error { return nil } func (*QuantileSketchVectorStepEvaluator) Error() error { return nil } - -func (e *QuantileSketchVectorStepEvaluator) Explain(parent Node) { - b := parent.Child("QuantileSketchVector") - e.inner.Explain(b) -} diff --git a/pkg/logql/shardmapper.go b/pkg/logql/shardmapper.go index d965676c278f0..67b35b809df10 100644 --- a/pkg/logql/shardmapper.go +++ b/pkg/logql/shardmapper.go @@ -471,6 +471,59 @@ func (m ShardMapper) mapRangeAggregationExpr(expr *syntax.RangeAggregationExpr, quantile: expr.Params, }, bytesPerShard, nil + case syntax.OpRangeTypeFirst: + potentialConflict := syntax.ReducesLabels(expr) + if !potentialConflict && (expr.Grouping == nil || expr.Grouping.Noop()) { + return m.mapSampleExpr(expr, r) + } + + shards, bytesPerShard, err := m.shards.Shards(expr) + if err != nil { + return nil, 0, err + } + if len(shards) == 0 { + return noOp(expr, m.shards.Resolver()) + } + + downstreams := make([]DownstreamSampleExpr, 0, len(shards)) + // This is the magic. We send a custom operation + expr.Operation = syntax.OpRangeTypeFirstWithTimestamp + for i := len(shards) - 1; i >= 0; i-- { + downstreams = append(downstreams, DownstreamSampleExpr{ + shard: &shards[i], + SampleExpr: expr, + }) + } + + return &MergeFirstOverTimeExpr{ + downstreams: downstreams, + }, bytesPerShard, nil + case syntax.OpRangeTypeLast: + potentialConflict := syntax.ReducesLabels(expr) + if !potentialConflict && (expr.Grouping == nil || expr.Grouping.Noop()) { + return m.mapSampleExpr(expr, r) + } + + shards, bytesPerShard, err := m.shards.Shards(expr) + if err != nil { + return nil, 0, err + } + if len(shards) == 0 { + return noOp(expr, m.shards.Resolver()) + } + + downstreams := make([]DownstreamSampleExpr, 0, len(shards)) + expr.Operation = syntax.OpRangeTypeLastWithTimestamp + for i := len(shards) - 1; i >= 0; i-- { + downstreams = append(downstreams, DownstreamSampleExpr{ + shard: &shards[i], + SampleExpr: expr, + }) + } + + return &MergeLastOverTimeExpr{ + downstreams: downstreams, + }, bytesPerShard, nil default: // don't shard if there's not an appropriate optimization return noOp(expr, m.shards.Resolver()) diff --git a/pkg/logql/step_evaluator.go b/pkg/logql/step_evaluator.go index 955f9e2b97f86..23b313f18da27 100644 --- a/pkg/logql/step_evaluator.go +++ b/pkg/logql/step_evaluator.go @@ -32,3 +32,20 @@ type StepEvaluator interface { // Explain returns a print of the step evaluation tree Explain(Node) } + +type EmptyEvaluator[R StepResult] struct { + value R +} + +var _ StepEvaluator = EmptyEvaluator[SampleVector]{} + +// Close implements StepEvaluator. +func (EmptyEvaluator[_]) Close() error { return nil } + +// Error implements StepEvaluator. +func (EmptyEvaluator[_]) Error() error { return nil } + +// Next implements StepEvaluator. +func (e EmptyEvaluator[_]) Next() (ok bool, ts int64, r StepResult) { + return false, 0, e.value +} diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go index 0cf43ae71c7d5..6e3f18b7cc8e6 100644 --- a/pkg/logql/syntax/ast.go +++ b/pkg/logql/syntax/ast.go @@ -1242,7 +1242,9 @@ const ( // internal expressions not represented in LogQL. These are used to // evaluate expressions differently resulting in intermediate formats // that are not consumable by LogQL clients but are used for sharding. - OpRangeTypeQuantileSketch = "__quantile_sketch_over_time__" + OpRangeTypeQuantileSketch = "__quantile_sketch_over_time__" + OpRangeTypeFirstWithTimestamp = "__first_over_time_ts__" + OpRangeTypeLastWithTimestamp = "__last_over_time_ts__" ) func IsComparisonOperator(op string) bool { @@ -1346,7 +1348,9 @@ func (e *RangeAggregationExpr) MatcherGroups() ([]MatcherRange, error) { func (e RangeAggregationExpr) validate() error { if e.Grouping != nil { switch e.Operation { - case OpRangeTypeAvg, OpRangeTypeStddev, OpRangeTypeStdvar, OpRangeTypeQuantile, OpRangeTypeQuantileSketch, OpRangeTypeMax, OpRangeTypeMin, OpRangeTypeFirst, OpRangeTypeLast: + case OpRangeTypeAvg, OpRangeTypeStddev, OpRangeTypeStdvar, OpRangeTypeQuantile, + OpRangeTypeQuantileSketch, OpRangeTypeMax, OpRangeTypeMin, OpRangeTypeFirst, + OpRangeTypeLast, OpRangeTypeFirstWithTimestamp, OpRangeTypeLastWithTimestamp: default: return fmt.Errorf("grouping not allowed for %s aggregation", e.Operation) } @@ -1355,7 +1359,8 @@ func (e RangeAggregationExpr) validate() error { switch e.Operation { case OpRangeTypeAvg, OpRangeTypeSum, OpRangeTypeMax, OpRangeTypeMin, OpRangeTypeStddev, OpRangeTypeStdvar, OpRangeTypeQuantile, OpRangeTypeRate, OpRangeTypeRateCounter, - OpRangeTypeAbsent, OpRangeTypeFirst, OpRangeTypeLast, OpRangeTypeQuantileSketch: + OpRangeTypeAbsent, OpRangeTypeFirst, OpRangeTypeLast, OpRangeTypeQuantileSketch, + OpRangeTypeFirstWithTimestamp, OpRangeTypeLastWithTimestamp: return nil default: return fmt.Errorf("invalid aggregation %s with unwrap", e.Operation) @@ -2216,6 +2221,8 @@ var shardableOps = map[string]bool{ // range vector ops OpRangeTypeAvg: true, OpRangeTypeCount: true, + OpRangeTypeFirst: true, + OpRangeTypeLast: true, OpRangeTypeRate: true, OpRangeTypeBytes: true, OpRangeTypeBytesRate: true, diff --git a/pkg/logql/test_utils.go b/pkg/logql/test_utils.go index d141f39bf0778..7c9d3233f4299 100644 --- a/pkg/logql/test_utils.go +++ b/pkg/logql/test_utils.go @@ -277,8 +277,9 @@ func randomStreams(nStreams, nEntries, nShards int, labelNames []string, valueFi if valueField { line = fmt.Sprintf("%s value=%f", line, r.Float64()*100.0) } + nanos := r.Int63n(time.Second.Nanoseconds()) stream.Entries = append(stream.Entries, logproto.Entry{ - Timestamp: time.Unix(0, int64(j*int(time.Second))), + Timestamp: time.Unix(0, int64(j*int(time.Second))+nanos), Line: line, }) } From 7cc9a9386a8f89dbec6a25435180ed4625ae6490 Mon Sep 17 00:00:00 2001 From: Sandeep Sukhani Date: Tue, 14 May 2024 13:09:05 +0530 Subject: [PATCH 27/47] fix: fix parsing of default per tenant otlp config (#12836) --- integration/cluster/cluster.go | 4 ++++ integration/loki_micro_services_test.go | 2 +- pkg/validation/limits.go | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/integration/cluster/cluster.go b/integration/cluster/cluster.go index 694119ca4fe21..2ee68e15cc178 100644 --- a/integration/cluster/cluster.go +++ b/integration/cluster/cluster.go @@ -68,6 +68,9 @@ limits_config: attributes_config: - action: index_label attributes: ["service.name"] + log_attributes: + - action: drop + attributes: [email] storage_config: named_stores: @@ -430,6 +433,7 @@ func (c *Component) run() error { return err } + config.LimitsConfig.SetGlobalOTLPConfig(config.Distributor.OTLPConfig) var err error c.loki, err = loki.New(config.Config) if err != nil { diff --git a/integration/loki_micro_services_test.go b/integration/loki_micro_services_test.go index 33f28296bc062..a048d09324948 100644 --- a/integration/loki_micro_services_test.go +++ b/integration/loki_micro_services_test.go @@ -739,7 +739,7 @@ func TestOTLPLogsIngestQuery(t *testing.T) { t.Run("ingest-logs", func(t *testing.T) { // ingest some log lines - require.NoError(t, cliDistributor.PushOTLPLogLine("lineA", now.Add(-45*time.Minute), map[string]any{"trace_id": 1, "user_id": "2"})) + require.NoError(t, cliDistributor.PushOTLPLogLine("lineA", now.Add(-45*time.Minute), map[string]any{"trace_id": 1, "user_id": "2", "email": "foo@bar.com"})) require.NoError(t, cliDistributor.PushOTLPLogLine("lineB", now.Add(-45*time.Minute), nil)) require.NoError(t, cliDistributor.PushOTLPLogLine("lineC", now, map[string]any{"order.ids": []any{5, 6}})) diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index a4280a2ee9e36..ca33d1f4bf425 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -402,7 +402,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { // SetGlobalOTLPConfig set GlobalOTLPConfig which is used while unmarshaling per-tenant otlp config to use the default list of resource attributes picked as index labels. func (l *Limits) SetGlobalOTLPConfig(cfg push.GlobalOTLPConfig) { l.GlobalOTLPConfig = cfg - l.OTLPConfig = push.DefaultOTLPConfig(cfg) + l.OTLPConfig.ApplyGlobalOTLPConfig(cfg) } // UnmarshalYAML implements the yaml.Unmarshaler interface. From 00bdd2f5b703991b280317ceff0fcf2eed1847d9 Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Tue, 14 May 2024 11:05:21 +0200 Subject: [PATCH 28/47] fix(blooms): Disable metas cache on bloom gateway (#12959) The bloom gateway does not fetch any metas any more since the index gateway resolves them. Renamed isModuleEnabled() to isTarget(), because the function name is confusing. Signed-off-by: Christian Haudum --- pkg/loki/loki.go | 10 +++++----- pkg/loki/modules.go | 40 ++++++++++++++++++++-------------------- pkg/loki/validation.go | 2 +- 3 files changed, 26 insertions(+), 26 deletions(-) diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index ff9c00e0ed598..b682c4bfaa65c 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -294,7 +294,7 @@ func (c *Config) Validate() error { return nil } -func (c *Config) isModuleEnabled(m string) bool { +func (c *Config) isTarget(m string) bool { return util.StringsContain(c.Target, m) } @@ -726,24 +726,24 @@ func (t *Loki) setupModuleManager() error { } // Add IngesterQuerier as a dependency for store when target is either querier, ruler, read, or backend. - if t.Cfg.isModuleEnabled(Querier) || t.Cfg.isModuleEnabled(Ruler) || t.Cfg.isModuleEnabled(Read) || t.Cfg.isModuleEnabled(Backend) { + if t.Cfg.isTarget(Querier) || t.Cfg.isTarget(Ruler) || t.Cfg.isTarget(Read) || t.Cfg.isTarget(Backend) { deps[Store] = append(deps[Store], IngesterQuerier) } // If the query scheduler and querier are running together, make sure the scheduler goes // first to initialize the ring that will also be used by the querier - if (t.Cfg.isModuleEnabled(Querier) && t.Cfg.isModuleEnabled(QueryScheduler)) || t.Cfg.isModuleEnabled(All) { + if (t.Cfg.isTarget(Querier) && t.Cfg.isTarget(QueryScheduler)) || t.Cfg.isTarget(All) { deps[Querier] = append(deps[Querier], QueryScheduler) } // If the query scheduler and query frontend are running together, make sure the scheduler goes // first to initialize the ring that will also be used by the query frontend - if (t.Cfg.isModuleEnabled(QueryFrontend) && t.Cfg.isModuleEnabled(QueryScheduler)) || t.Cfg.isModuleEnabled(All) { + if (t.Cfg.isTarget(QueryFrontend) && t.Cfg.isTarget(QueryScheduler)) || t.Cfg.isTarget(All) { deps[QueryFrontend] = append(deps[QueryFrontend], QueryScheduler) } // Initialise query tags interceptors on targets running ingester - if t.Cfg.isModuleEnabled(Ingester) || t.Cfg.isModuleEnabled(Write) || t.Cfg.isModuleEnabled(All) { + if t.Cfg.isTarget(Ingester) || t.Cfg.isTarget(Write) || t.Cfg.isTarget(All) { deps[Server] = append(deps[Server], IngesterGRPCInterceptors) } diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 458d7c9e3f5c8..a509fbe263e0f 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -295,7 +295,7 @@ func (t *Loki) initOverrides() (_ services.Service, err error) { } func (t *Loki) initOverridesExporter() (services.Service, error) { - if t.Cfg.isModuleEnabled(OverridesExporter) && t.TenantLimits == nil || t.Overrides == nil { + if t.Cfg.isTarget(OverridesExporter) && t.TenantLimits == nil || t.Overrides == nil { // This target isn't enabled by default ("all") and requires per-tenant limits to run. return nil, errors.New("overrides-exporter has been enabled, but no runtime configuration file was configured") } @@ -347,7 +347,7 @@ func (t *Loki) initDistributor() (services.Service, error) { // Register the distributor to receive Push requests over GRPC // EXCEPT when running with `-target=all` or `-target=` contains `ingester` - if !t.Cfg.isModuleEnabled(All) && !t.Cfg.isModuleEnabled(Write) && !t.Cfg.isModuleEnabled(Ingester) { + if !t.Cfg.isTarget(All) && !t.Cfg.isTarget(Write) && !t.Cfg.isTarget(Ingester) { logproto.RegisterPusherServer(t.Server.GRPC, t.distributor) } @@ -407,13 +407,13 @@ func (t *Loki) initQuerier() (services.Service, error) { } querierWorkerServiceConfig := querier.WorkerServiceConfig{ - AllEnabled: t.Cfg.isModuleEnabled(All), - ReadEnabled: t.Cfg.isModuleEnabled(Read), + AllEnabled: t.Cfg.isTarget(All), + ReadEnabled: t.Cfg.isTarget(Read), GrpcListenAddress: t.Cfg.Server.GRPCListenAddress, GrpcListenPort: t.Cfg.Server.GRPCListenPort, QuerierWorkerConfig: &t.Cfg.Worker, - QueryFrontendEnabled: t.Cfg.isModuleEnabled(QueryFrontend), - QuerySchedulerEnabled: t.Cfg.isModuleEnabled(QueryScheduler), + QueryFrontendEnabled: t.Cfg.isTarget(QueryFrontend), + QuerySchedulerEnabled: t.Cfg.isTarget(QueryScheduler), SchedulerRing: scheduler.SafeReadRing(t.Cfg.QueryScheduler, t.querySchedulerRingManager), } @@ -727,7 +727,7 @@ func (t *Loki) initBloomStore() (services.Service, error) { bsCfg := t.Cfg.StorageConfig.BloomShipperConfig var metasCache cache.Cache - if cache.IsCacheConfigured(bsCfg.MetasCache) { + if t.Cfg.isTarget(IndexGateway) && cache.IsCacheConfigured(bsCfg.MetasCache) { metasCache, err = cache.New(bsCfg.MetasCache, reg, logger, stats.BloomMetasCache, constants.Loki) // always enable LRU cache @@ -781,7 +781,7 @@ func (t *Loki) updateConfigForShipperStore() { } switch true { - case t.Cfg.isModuleEnabled(Ingester), t.Cfg.isModuleEnabled(Write): + case t.Cfg.isTarget(Ingester), t.Cfg.isTarget(Write): // Use embedded cache for caching index in memory, this also significantly helps performance. t.Cfg.StorageConfig.IndexQueriesCacheConfig = cache.Config{ EmbeddedCache: cache.EmbeddedCacheConfig{ @@ -803,7 +803,7 @@ func (t *Loki) updateConfigForShipperStore() { t.Cfg.StorageConfig.TSDBShipperConfig.Mode = indexshipper.ModeWriteOnly t.Cfg.StorageConfig.TSDBShipperConfig.IngesterDBRetainPeriod = shipperQuerierIndexUpdateDelay(t.Cfg.StorageConfig.IndexCacheValidity, t.Cfg.StorageConfig.TSDBShipperConfig.ResyncInterval) - case t.Cfg.isModuleEnabled(Querier), t.Cfg.isModuleEnabled(Ruler), t.Cfg.isModuleEnabled(Read), t.Cfg.isModuleEnabled(Backend), t.isModuleActive(IndexGateway), t.Cfg.isModuleEnabled(BloomCompactor): + case t.Cfg.isTarget(Querier), t.Cfg.isTarget(Ruler), t.Cfg.isTarget(Read), t.Cfg.isTarget(Backend), t.isModuleActive(IndexGateway), t.Cfg.isTarget(BloomCompactor): // We do not want query to do any updates to index t.Cfg.StorageConfig.BoltDBShipperConfig.Mode = indexshipper.ModeReadOnly t.Cfg.StorageConfig.TSDBShipperConfig.Mode = indexshipper.ModeReadOnly @@ -844,7 +844,7 @@ func (t *Loki) setupAsyncStore() error { ) switch true { - case t.Cfg.isModuleEnabled(Querier), t.Cfg.isModuleEnabled(Ruler), t.Cfg.isModuleEnabled(Read): + case t.Cfg.isTarget(Querier), t.Cfg.isTarget(Ruler), t.Cfg.isTarget(Read): // Do not use the AsyncStore if the querier is configured with QueryStoreOnly set to true if t.Cfg.Querier.QueryStoreOnly { break @@ -855,16 +855,16 @@ func (t *Loki) setupAsyncStore() error { asyncStore = true // The legacy Read target includes the index gateway, so disable the index-gateway client in that configuration. - if t.Cfg.LegacyReadTarget && t.Cfg.isModuleEnabled(Read) { + if t.Cfg.LegacyReadTarget && t.Cfg.isTarget(Read) { t.Cfg.StorageConfig.BoltDBShipperConfig.IndexGatewayClientConfig.Disabled = true t.Cfg.StorageConfig.TSDBShipperConfig.IndexGatewayClientConfig.Disabled = true } // Backend target includes the index gateway - case t.Cfg.isModuleEnabled(IndexGateway), t.Cfg.isModuleEnabled(Backend): + case t.Cfg.isTarget(IndexGateway), t.Cfg.isTarget(Backend): // we want to use the actual storage when running the index-gateway, so we remove the Addr from the config t.Cfg.StorageConfig.BoltDBShipperConfig.IndexGatewayClientConfig.Disabled = true t.Cfg.StorageConfig.TSDBShipperConfig.IndexGatewayClientConfig.Disabled = true - case t.Cfg.isModuleEnabled(All): + case t.Cfg.isTarget(All): // We want ingester to also query the store when using boltdb-shipper but only when running with target All. // We do not want to use AsyncStore otherwise it would start spiraling around doing queries over and over again to the ingesters and store. // ToDo: See if we can avoid doing this when not running loki in clustered mode. @@ -985,8 +985,8 @@ func (t *Loki) supportIndexDeleteRequest() bool { // compactorAddress returns the configured address of the compactor. // It prefers grpc address over http. If the address is grpc then the bool would be true otherwise false func (t *Loki) compactorAddress() (string, bool, error) { - legacyReadMode := t.Cfg.LegacyReadTarget && t.Cfg.isModuleEnabled(Read) - if t.Cfg.isModuleEnabled(All) || legacyReadMode || t.Cfg.isModuleEnabled(Backend) { + legacyReadMode := t.Cfg.LegacyReadTarget && t.Cfg.isTarget(Read) + if t.Cfg.isTarget(All) || legacyReadMode || t.Cfg.isTarget(Backend) { // In single binary or read modes, this module depends on Server return net.JoinHostPort(t.Cfg.Server.GRPCListenAddress, strconv.Itoa(t.Cfg.Server.GRPCListenPort)), true, nil } @@ -1151,8 +1151,8 @@ func (t *Loki) initRulerStorage() (_ services.Service, err error) { // unfortunately there is no way to generate a "default" config and compare default against actual // to determine if it's unconfigured. the following check, however, correctly tests this. // Single binary integration tests will break if this ever drifts - legacyReadMode := t.Cfg.LegacyReadTarget && t.Cfg.isModuleEnabled(Read) - if (t.Cfg.isModuleEnabled(All) || legacyReadMode || t.Cfg.isModuleEnabled(Backend)) && t.Cfg.Ruler.StoreConfig.IsDefaults() { + legacyReadMode := t.Cfg.LegacyReadTarget && t.Cfg.isTarget(Read) + if (t.Cfg.isTarget(All) || legacyReadMode || t.Cfg.isTarget(Backend)) && t.Cfg.Ruler.StoreConfig.IsDefaults() { level.Info(util_log.Logger).Log("msg", "Ruler storage is not configured; ruler will not be started.") return } @@ -1476,7 +1476,7 @@ func (t *Loki) initIndexGatewayRing() (_ services.Service, err error) { t.Cfg.StorageConfig.TSDBShipperConfig.Mode = indexshipper.ModeReadOnly managerMode := lokiring.ClientMode - if t.Cfg.isModuleEnabled(IndexGateway) || legacyReadMode || t.Cfg.isModuleEnabled(Backend) { + if t.Cfg.isTarget(IndexGateway) || legacyReadMode || t.Cfg.isTarget(Backend) { managerMode = lokiring.ServerMode } rm, err := lokiring.NewRingManager(indexGatewayRingKey, managerMode, t.Cfg.IndexGateway.Ring, t.Cfg.IndexGateway.Ring.ReplicationFactor, indexgateway.NumTokens, util_log.Logger, prometheus.DefaultRegisterer) @@ -1497,7 +1497,7 @@ func (t *Loki) initIndexGatewayRing() (_ services.Service, err error) { func (t *Loki) initIndexGatewayInterceptors() (services.Service, error) { // Only expose per-tenant metric if index gateway runs as standalone service - if t.Cfg.isModuleEnabled(IndexGateway) { + if t.Cfg.isTarget(IndexGateway) { interceptors := indexgateway.NewServerInterceptors(prometheus.DefaultRegisterer) t.Cfg.Server.GRPCMiddleware = append(t.Cfg.Server.GRPCMiddleware, interceptors.PerTenantRequestCount) } @@ -1572,7 +1572,7 @@ func (t *Loki) initQuerySchedulerRing() (_ services.Service, err error) { t.Cfg.QueryScheduler.SchedulerRing.ListenPort = t.Cfg.Server.GRPCListenPort managerMode := lokiring.ClientMode - if t.Cfg.isModuleEnabled(QueryScheduler) || t.Cfg.isModuleEnabled(Backend) || t.Cfg.isModuleEnabled(All) || (t.Cfg.LegacyReadTarget && t.Cfg.isModuleEnabled(Read)) { + if t.Cfg.isTarget(QueryScheduler) || t.Cfg.isTarget(Backend) || t.Cfg.isTarget(All) || (t.Cfg.LegacyReadTarget && t.Cfg.isTarget(Read)) { managerMode = lokiring.ServerMode } rm, err := lokiring.NewRingManager(schedulerRingKey, managerMode, t.Cfg.QueryScheduler.SchedulerRing, scheduler.ReplicationFactor, scheduler.NumTokens, util_log.Logger, prometheus.DefaultRegisterer) diff --git a/pkg/loki/validation.go b/pkg/loki/validation.go index 1acb8d20afe17..6e7e19cc44805 100644 --- a/pkg/loki/validation.go +++ b/pkg/loki/validation.go @@ -14,7 +14,7 @@ func validateBackendAndLegacyReadMode(c *Config) []error { var errs []error // Honor the legacy scalable deployment topology if c.LegacyReadTarget { - if c.isModuleEnabled(Backend) { + if c.isTarget(Backend) { errs = append(errs, fmt.Errorf("CONFIG ERROR: invalid target, cannot run backend target with legacy read mode")) } } From a1b1eeb09583f04a36ebdb96f716f3f285b90adf Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Tue, 14 May 2024 13:28:48 +0200 Subject: [PATCH 29/47] feat(bloom): Skip attempts to filter chunks for which blooms have not been built (#12961) Bloom filters are built from `today - MaxTableOffset` to `today - MinTableOffset`, this means that blooms are not available for the most recent period, between `now` and at least `today - MinTableOffset`. To avoid resolving chunks and filtering out no-matches, we can skip chunks for this period completely and return them as-is without filtering. Signed-off-by: Christian Haudum --- pkg/bloomgateway/querier.go | 39 ++++++++++++++++++++++++++++++-- pkg/bloomgateway/querier_test.go | 9 ++++---- pkg/loki/modules.go | 5 +++- 3 files changed, 46 insertions(+), 7 deletions(-) diff --git a/pkg/bloomgateway/querier.go b/pkg/bloomgateway/querier.go index 0e523817dc70a..c92d6fad30f73 100644 --- a/pkg/bloomgateway/querier.go +++ b/pkg/bloomgateway/querier.go @@ -2,6 +2,7 @@ package bloomgateway import ( "context" + "time" "github.com/go-kit/log" "github.com/go-kit/log/level" @@ -13,6 +14,7 @@ import ( "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/querier/plan" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" + "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" "github.com/grafana/loki/v3/pkg/util/constants" "github.com/grafana/loki/v3/pkg/util/spanlogger" @@ -61,19 +63,26 @@ func newQuerierMetrics(registerer prometheus.Registerer, namespace, subsystem st } } +type QuerierConfig struct { + // MinTableOffset is derived from the compactor's MinTableOffset + MinTableOffset int +} + // BloomQuerier is a store-level abstraction on top of Client // It is used by the index gateway to filter ChunkRefs based on given line fiter expression. type BloomQuerier struct { c Client + cfg QuerierConfig logger log.Logger metrics *querierMetrics limits Limits blockResolver BlockResolver } -func NewQuerier(c Client, limits Limits, resolver BlockResolver, r prometheus.Registerer, logger log.Logger) *BloomQuerier { +func NewQuerier(c Client, cfg QuerierConfig, limits Limits, resolver BlockResolver, r prometheus.Registerer, logger log.Logger) *BloomQuerier { return &BloomQuerier{ c: c, + cfg: cfg, logger: logger, metrics: newQuerierMetrics(r, constants.Loki, querierMetricsSubsystem), limits: limits, @@ -101,6 +110,33 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from preFilterChunks := len(chunkRefs) preFilterSeries := len(grouped) + // Do not attempt to filter chunks for which there are no blooms + if bq.cfg.MinTableOffset > 0 { + minAge := truncateDay(model.Now()).Add(-1 * config.ObjectStorageIndexRequiredPeriod * time.Duration(bq.cfg.MinTableOffset-1)) + if through.After(minAge) { + level.Debug(logger).Log( + "msg", "skip too recent chunks", + "tenant", tenant, + "from", from.Time(), + "through", through.Time(), + "responses", 0, + "preFilterChunks", preFilterChunks, + "postFilterChunks", preFilterChunks, + "filteredChunks", 0, + "preFilterSeries", preFilterSeries, + "postFilterSeries", preFilterSeries, + "filteredSeries", 0, + ) + + bq.metrics.chunksTotal.Add(float64(preFilterChunks)) + bq.metrics.chunksFiltered.Add(0) + bq.metrics.seriesTotal.Add(float64(preFilterSeries)) + bq.metrics.seriesFiltered.Add(0) + + return chunkRefs, nil + } + } + responses := make([][]*logproto.GroupedChunkRefs, 0, 2) // We can perform requests sequentially, because most of the time the request // only covers a single day, and if not, it's at most two days. @@ -153,7 +189,6 @@ func (bq *BloomQuerier) FilterChunkRefs(ctx context.Context, tenant string, from "preFilterSeries", preFilterSeries, "postFilterSeries", postFilterSeries, "filteredSeries", preFilterSeries-postFilterSeries, - "operation", "bloomquerier.FilterChunkRefs", ) bq.metrics.chunksTotal.Add(float64(preFilterChunks)) diff --git a/pkg/bloomgateway/querier_test.go b/pkg/bloomgateway/querier_test.go index 516f1cd403bb3..d4b24447ae124 100644 --- a/pkg/bloomgateway/querier_test.go +++ b/pkg/bloomgateway/querier_test.go @@ -61,12 +61,13 @@ var _ BlockResolver = &mockBlockResolver{} func TestBloomQuerier(t *testing.T) { logger := log.NewNopLogger() limits := newLimits() + cfg := QuerierConfig{} resolver := &mockBlockResolver{} tenant := "fake" t.Run("client not called when filters are empty", func(t *testing.T) { c := &noopClient{} - bq := NewQuerier(c, limits, resolver, nil, logger) + bq := NewQuerier(c, cfg, limits, resolver, nil, logger) ctx := context.Background() through := model.Now() @@ -86,7 +87,7 @@ func TestBloomQuerier(t *testing.T) { t.Run("client not called when chunkRefs are empty", func(t *testing.T) { c := &noopClient{} - bq := NewQuerier(c, limits, resolver, nil, logger) + bq := NewQuerier(c, cfg, limits, resolver, nil, logger) ctx := context.Background() through := model.Now() @@ -102,7 +103,7 @@ func TestBloomQuerier(t *testing.T) { t.Run("querier propagates error from client", func(t *testing.T) { c := &noopClient{err: errors.New("something went wrong")} - bq := NewQuerier(c, limits, resolver, nil, logger) + bq := NewQuerier(c, cfg, limits, resolver, nil, logger) ctx := context.Background() through := model.Now() @@ -121,7 +122,7 @@ func TestBloomQuerier(t *testing.T) { t.Run("client called once for each day of the interval", func(t *testing.T) { c := &noopClient{} - bq := NewQuerier(c, limits, resolver, nil, logger) + bq := NewQuerier(c, cfg, limits, resolver, nil, logger) ctx := context.Background() from := mktime("2024-04-16 22:00") diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index a509fbe263e0f..3561f89a23187 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -1445,7 +1445,10 @@ func (t *Loki) initIndexGateway() (services.Service, error) { return nil, err } resolver := bloomgateway.NewBlockResolver(t.BloomStore, logger) - bloomQuerier = bloomgateway.NewQuerier(bloomGatewayClient, t.Overrides, resolver, prometheus.DefaultRegisterer, logger) + querierCfg := bloomgateway.QuerierConfig{ + MinTableOffset: t.Cfg.BloomCompactor.MinTableOffset, + } + bloomQuerier = bloomgateway.NewQuerier(bloomGatewayClient, querierCfg, t.Overrides, resolver, prometheus.DefaultRegisterer, logger) } gateway, err := indexgateway.NewIndexGateway(t.Cfg.IndexGateway, t.Overrides, logger, prometheus.DefaultRegisterer, t.Store, indexClients, bloomQuerier) From bc53b337218425af5b5ce69dcef56e27afec6647 Mon Sep 17 00:00:00 2001 From: benclive Date: Wed, 15 May 2024 10:45:37 +0100 Subject: [PATCH 30/47] feat: Add metrics for number of patterns detected & evicted (#12918) --- pkg/pattern/drain/drain.go | 17 +++++++--- pkg/pattern/drain/drain_test.go | 56 ++++++++++++++++----------------- pkg/pattern/drain/metrics.go | 8 +++++ pkg/pattern/ingester.go | 2 +- pkg/pattern/ingester_querier.go | 2 +- pkg/pattern/ingester_test.go | 2 +- pkg/pattern/instance.go | 6 ++-- pkg/pattern/metrics.go | 16 +++++++++- pkg/pattern/stream.go | 6 +++- pkg/pattern/stream_test.go | 4 +-- 10 files changed, 77 insertions(+), 42 deletions(-) create mode 100644 pkg/pattern/drain/metrics.go diff --git a/pkg/pattern/drain/drain.go b/pkg/pattern/drain/drain.go index ade8fca366b8a..31932832f7010 100644 --- a/pkg/pattern/drain/drain.go +++ b/pkg/pattern/drain/drain.go @@ -44,11 +44,11 @@ type Config struct { ParamString string } -func createLogClusterCache(maxSize int) *LogClusterCache { +func createLogClusterCache(maxSize int, onEvict func(int, *LogCluster)) *LogClusterCache { if maxSize == 0 { maxSize = math.MaxInt } - cache, _ := simplelru.NewLRU[int, *LogCluster](maxSize, nil) + cache, _ := simplelru.NewLRU[int, *LogCluster](maxSize, onEvict) return &LogClusterCache{ cache: cache, } @@ -146,16 +146,21 @@ func DefaultConfig() *Config { } } -func New(config *Config) *Drain { +func New(config *Config, metrics *Metrics) *Drain { if config.LogClusterDepth < 3 { panic("depth argument must be at least 3") } config.maxNodeDepth = config.LogClusterDepth - 2 + var evictFn func(int, *LogCluster) + if metrics != nil { + evictFn = func(int, *LogCluster) { metrics.PatternsEvictedTotal.Inc() } + } d := &Drain{ config: config, rootNode: createNode(), - idToCluster: createLogClusterCache(config.MaxClusters), + idToCluster: createLogClusterCache(config.MaxClusters, evictFn), + metrics: metrics, } return d } @@ -165,6 +170,7 @@ type Drain struct { rootNode *Node idToCluster *LogClusterCache clustersCounter int + metrics *Metrics } func (d *Drain) Clusters() []*LogCluster { @@ -195,6 +201,9 @@ func (d *Drain) train(tokens []string, stringer func([]string) string, ts int64) matchCluster.append(model.TimeFromUnixNano(ts)) d.idToCluster.Set(clusterID, matchCluster) d.addSeqToPrefixTree(d.rootNode, matchCluster) + if d.metrics != nil { + d.metrics.PatternsDetectedTotal.Inc() + } } else { newTemplateTokens := d.createTemplate(tokens, matchCluster.Tokens) matchCluster.Tokens = newTemplateTokens diff --git a/pkg/pattern/drain/drain_test.go b/pkg/pattern/drain/drain_test.go index 72b80aeb67d34..e9709aed3fec4 100644 --- a/pkg/pattern/drain/drain_test.go +++ b/pkg/pattern/drain/drain_test.go @@ -20,9 +20,9 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { }{ { // High variation leads to many patterns including some that are too generic (many tokens matched) and some that are too specific (too few matchers) - name: "Generate patterns on high variation logfmt logs", - drain: New(DefaultConfig()), - inputFile: "testdata/agent-logfmt.txt", + name: `Generate patterns on high variation logfmt logs`, + drain: New(DefaultConfig(), nil), + inputFile: `testdata/agent-logfmt.txt`, patterns: []string{ "ts=2024-04-16T15:10:43.192290389Z caller=filetargetmanager.go:361 level=info component=logs logs_config=default msg=\"Adding target\" key=\"/var/log/pods/*19a1cce8-5f04-46e0-a124-292b0dd9b343/testcoordinator/*.log:{batch_kubernetes_io_controller_uid=\\\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\\\", batch_kubernetes_io_job_name=\\\"testcoordinator-job-2665838\\\", container=\\\"testcoordinator\\\", controller_uid=\\\"25ec5edf-f78e-468b-b6f3-3b9685f0cc8f\\\", job=\\\"k6-cloud/testcoordinator\\\", job_name=\\\"testcoordinator-job-2665838\\\", name=\\\"testcoordinator\\\", namespace=\\\"k6-cloud\\\", pod=\\\"testcoordinator-job-2665838-9g8ds\\\"}\"", "<_> <_> level=info component=logs logs_config=default <_> target\" <_> <_> <_> <_> <_> <_>", @@ -42,9 +42,9 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { }, { // Lower variation leads to fewer patterns including some with limited value (single lines, no matchers) - name: "Generate patterns on low variation logfmt logs", - drain: New(DefaultConfig()), - inputFile: "testdata/ingester-logfmt.txt", + name: `Generate patterns on low variation logfmt logs`, + drain: New(DefaultConfig(), nil), + inputFile: `testdata/ingester-logfmt.txt`, patterns: []string{ "<_> caller=head.go:216 level=debug tenant=987678 msg=\"profile is empty after delta computation\" metricName=memory", "ts=2024-04-17T09:52:46.363974185Z caller=http.go:194 level=debug traceID=1b48f5156a61ca69 msg=\"GET /debug/pprof/delta_mutex (200) 1.161082ms\"", @@ -53,9 +53,9 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { }, { // Lower variation logs in json leads to a high number of patterns with very few matchers - name: "Generate patterns on json formatted logs", - drain: New(DefaultConfig()), - inputFile: "testdata/drone-json.txt", + name: `Generate patterns on json formatted logs`, + drain: New(DefaultConfig(), nil), + inputFile: `testdata/drone-json.txt`, patterns: []string{ "<_> capacity <_>", "<_> capacity changes <_>", @@ -96,7 +96,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { }, { name: "Patterns for distributor logs", - drain: New(DefaultConfig()), + drain: New(DefaultConfig(), nil), inputFile: "testdata/distributor-logfmt.txt", patterns: []string{ `<_> caller=http.go:194 level=debug <_> <_> msg="POST <_> <_> <_>`, @@ -104,7 +104,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { }, { name: "Patterns for journald logs", - drain: New(DefaultConfig()), + drain: New(DefaultConfig(), nil), inputFile: "testdata/journald.txt", patterns: []string{ "2024-05-07T11:59:43.484606Z INFO ExtHandler ExtHandler Downloading agent manifest", @@ -195,7 +195,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { }, { name: "Patterns for kafka logs", - drain: New(DefaultConfig()), + drain: New(DefaultConfig(), nil), inputFile: "testdata/kafka.txt", patterns: []string{ `[2024-05-07 <_> INFO [LocalLog partition=mimir-dev-09-aggregations-offsets-0, dir=/bitnami/kafka/data] Deleting segment files <_> size=948, <_> <_> (kafka.log.LocalLog$)`, @@ -219,7 +219,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { }, { name: "Patterns for kubernetes logs", - drain: New(DefaultConfig()), + drain: New(DefaultConfig(), nil), inputFile: "testdata/kubernetes.txt", patterns: []string{ "I0507 12:04:17.596484 1 highnodeutilization.go:107] \"Criteria for a node below target utilization\" CPU=50 Mem=50 Pods=100", @@ -252,7 +252,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { }, { name: "Patterns for vault logs", - drain: New(DefaultConfig()), + drain: New(DefaultConfig(), nil), inputFile: "testdata/vault.txt", patterns: []string{ "<_> [INFO] expiration: revoked lease: <_>", @@ -260,7 +260,7 @@ func TestDrain_TrainExtractsPatterns(t *testing.T) { }, { name: "Patterns for calico logs", - drain: New(DefaultConfig()), + drain: New(DefaultConfig(), nil), inputFile: "testdata/calico.txt", patterns: []string{ `2024-05-08 <_> [DEBUG][216945] felix/table.go 870: Found forward-reference <_> ipVersion=0x4 <_> <_> [0:0]" table="nat"`, @@ -383,8 +383,8 @@ func TestDrain_TrainGeneratesMatchablePatterns(t *testing.T) { inputLines []string }{ { - name: `should match each line against a pattern`, - drain: New(DefaultConfig()), + name: "should match each line against a pattern", + drain: New(DefaultConfig(), nil), inputLines: []string{ "test test test", "test test test", @@ -393,8 +393,8 @@ func TestDrain_TrainGeneratesMatchablePatterns(t *testing.T) { }, }, { - name: `should also match newlines`, - drain: New(DefaultConfig()), + name: "should also match newlines", + drain: New(DefaultConfig(), nil), inputLines: []string{ `test test test `, @@ -413,7 +413,6 @@ func TestDrain_TrainGeneratesMatchablePatterns(t *testing.T) { for _, line := range tt.inputLines { tt.drain.Train(line, 0) } - t.Log(`Learned clusters`, tt.drain.Clusters()) for _, line := range tt.inputLines { match := tt.drain.Match(line) @@ -432,8 +431,8 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T) inputLines []string }{ { - name: `should extract patterns that all lines match`, - drain: New(DefaultConfig()), + name: "should extract patterns that all lines match", + drain: New(DefaultConfig(), nil), inputLines: []string{ "test 1 test", "test 2 test", @@ -442,8 +441,8 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T) }, }, { - name: `should extract patterns that match if line ends with newlines`, - drain: New(DefaultConfig()), + name: "should extract patterns that match if line ends with newlines", + drain: New(DefaultConfig(), nil), inputLines: []string{ `test 1 test `, @@ -456,8 +455,8 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T) }, }, { - name: `should extract patterns that match if line ends with empty space`, - drain: New(DefaultConfig()), + name: "should extract patterns that match if line ends with empty space", + drain: New(DefaultConfig(), nil), inputLines: []string{ `test 1 test `, `test 2 test `, @@ -466,8 +465,8 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T) }, }, { - name: `should extract patterns that match if line starts with empty space`, - drain: New(DefaultConfig()), + name: "should extract patterns that match if line starts with empty space", + drain: New(DefaultConfig(), nil), inputLines: []string{ ` test 1 test`, ` test 2 test`, @@ -484,7 +483,6 @@ func TestDrain_TrainGeneratesPatternsMatchableByLokiPatternFilter(t *testing.T) } require.Equal(t, 1, len(tt.drain.Clusters())) cluster := tt.drain.Clusters()[0] - t.Log(`Extracted cluster: `, cluster) matcher, err := pattern.ParseLineFilter([]byte(cluster.String())) require.NoError(t, err) diff --git a/pkg/pattern/drain/metrics.go b/pkg/pattern/drain/metrics.go new file mode 100644 index 0000000000000..b09ef12301271 --- /dev/null +++ b/pkg/pattern/drain/metrics.go @@ -0,0 +1,8 @@ +package drain + +import "github.com/prometheus/client_golang/prometheus" + +type Metrics struct { + PatternsEvictedTotal prometheus.Counter + PatternsDetectedTotal prometheus.Counter +} diff --git a/pkg/pattern/ingester.go b/pkg/pattern/ingester.go index af2e842c28b83..1cb91a1cda299 100644 --- a/pkg/pattern/ingester.go +++ b/pkg/pattern/ingester.go @@ -273,7 +273,7 @@ func (i *Ingester) GetOrCreateInstance(instanceID string) (*instance, error) { / inst, ok = i.instances[instanceID] if !ok { var err error - inst, err = newInstance(instanceID, i.logger) + inst, err = newInstance(instanceID, i.logger, i.metrics) if err != nil { return nil, err } diff --git a/pkg/pattern/ingester_querier.go b/pkg/pattern/ingester_querier.go index bfbaeb92aedbc..13315b0a13f1a 100644 --- a/pkg/pattern/ingester_querier.go +++ b/pkg/pattern/ingester_querier.go @@ -67,7 +67,7 @@ func (q *IngesterQuerier) Patterns(ctx context.Context, req *logproto.QueryPatte } func prunePatterns(resp *logproto.QueryPatternsResponse, minClusterSize int) *logproto.QueryPatternsResponse { - d := drain.New(drain.DefaultConfig()) + d := drain.New(drain.DefaultConfig(), nil) for _, p := range resp.Series { d.TrainPattern(p.Pattern, p.Samples) } diff --git a/pkg/pattern/ingester_test.go b/pkg/pattern/ingester_test.go index 16d5d0f04189f..eff054b9ec041 100644 --- a/pkg/pattern/ingester_test.go +++ b/pkg/pattern/ingester_test.go @@ -18,7 +18,7 @@ import ( func TestInstancePushQuery(t *testing.T) { lbs := labels.New(labels.Label{Name: "test", Value: "test"}) - inst, err := newInstance("foo", log.NewNopLogger()) + inst, err := newInstance("foo", log.NewNopLogger(), newIngesterMetrics(nil, "test")) require.NoError(t, err) err = inst.Push(context.Background(), &push.PushRequest{ diff --git a/pkg/pattern/instance.go b/pkg/pattern/instance.go index 7ac0099edec36..f6efa7de04435 100644 --- a/pkg/pattern/instance.go +++ b/pkg/pattern/instance.go @@ -30,9 +30,10 @@ type instance struct { streams *streamsMap index *index.BitPrefixInvertedIndex logger log.Logger + metrics *ingesterMetrics } -func newInstance(instanceID string, logger log.Logger) (*instance, error) { +func newInstance(instanceID string, logger log.Logger, metrics *ingesterMetrics) (*instance, error) { index, err := index.NewBitPrefixWithShards(indexShards) if err != nil { return nil, err @@ -43,6 +44,7 @@ func newInstance(instanceID string, logger log.Logger) (*instance, error) { instanceID: instanceID, streams: newStreamsMap(), index: index, + metrics: metrics, } i.mapper = ingester.NewFPMapper(i.getLabelsFromFingerprint) return i, nil @@ -138,7 +140,7 @@ func (i *instance) createStream(_ context.Context, pushReqStream logproto.Stream } fp := i.getHashForLabels(labels) sortedLabels := i.index.Add(logproto.FromLabelsToLabelAdapters(labels), fp) - s, err := newStream(fp, sortedLabels) + s, err := newStream(fp, sortedLabels, i.metrics) if err != nil { return nil, fmt.Errorf("failed to create stream: %w", err) } diff --git a/pkg/pattern/metrics.go b/pkg/pattern/metrics.go index e4a9c146c36f6..cb814d56905a8 100644 --- a/pkg/pattern/metrics.go +++ b/pkg/pattern/metrics.go @@ -6,7 +6,9 @@ import ( ) type ingesterMetrics struct { - flushQueueLength prometheus.Gauge + flushQueueLength prometheus.Gauge + patternsDiscardedTotal prometheus.Counter + patternsDetectedTotal prometheus.Counter } func newIngesterMetrics(r prometheus.Registerer, metricsNamespace string) *ingesterMetrics { @@ -17,5 +19,17 @@ func newIngesterMetrics(r prometheus.Registerer, metricsNamespace string) *inges Name: "flush_queue_length", Help: "The total number of series pending in the flush queue.", }), + patternsDiscardedTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Namespace: metricsNamespace, + Subsystem: "pattern_ingester", + Name: "patterns_evicted_total", + Help: "The total number of patterns evicted from the LRU cache.", + }), + patternsDetectedTotal: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Namespace: metricsNamespace, + Subsystem: "pattern_ingester", + Name: "patterns_detected_total", + Help: "The total number of patterns detected from incoming log lines.", + }), } } diff --git a/pkg/pattern/stream.go b/pkg/pattern/stream.go index 8321fce9f647c..f3aad280250db 100644 --- a/pkg/pattern/stream.go +++ b/pkg/pattern/stream.go @@ -27,13 +27,17 @@ type stream struct { func newStream( fp model.Fingerprint, labels labels.Labels, + metrics *ingesterMetrics, ) (*stream, error) { return &stream{ fp: fp, labels: labels, labelsString: labels.String(), labelHash: labels.Hash(), - patterns: drain.New(drain.DefaultConfig()), + patterns: drain.New(drain.DefaultConfig(), &drain.Metrics{ + PatternsEvictedTotal: metrics.patternsDiscardedTotal, + PatternsDetectedTotal: metrics.patternsDetectedTotal, + }), }, nil } diff --git a/pkg/pattern/stream_test.go b/pkg/pattern/stream_test.go index cd76336b2e600..f2218816b1113 100644 --- a/pkg/pattern/stream_test.go +++ b/pkg/pattern/stream_test.go @@ -16,7 +16,7 @@ import ( func TestAddStream(t *testing.T) { lbs := labels.New(labels.Label{Name: "test", Value: "test"}) - stream, err := newStream(model.Fingerprint(lbs.Hash()), lbs) + stream, err := newStream(model.Fingerprint(lbs.Hash()), lbs, newIngesterMetrics(nil, "test")) require.NoError(t, err) err = stream.Push(context.Background(), []push.Entry{ @@ -44,7 +44,7 @@ func TestAddStream(t *testing.T) { func TestPruneStream(t *testing.T) { lbs := labels.New(labels.Label{Name: "test", Value: "test"}) - stream, err := newStream(model.Fingerprint(lbs.Hash()), lbs) + stream, err := newStream(model.Fingerprint(lbs.Hash()), lbs, newIngesterMetrics(nil, "test")) require.NoError(t, err) err = stream.Push(context.Background(), []push.Entry{ From e7fdeb974aff62c5775b9f98ebb2228000b28c8d Mon Sep 17 00:00:00 2001 From: Shantanu Alshi Date: Wed, 15 May 2024 21:26:14 +0530 Subject: [PATCH 31/47] perf: Improve Detected labels API (#12816) --- pkg/ingester/ingester.go | 2 +- pkg/ingester/ingester_test.go | 8 +- pkg/loghttp/labels.go | 11 +- pkg/logproto/logproto.pb.go | 480 +++++++++++--------- pkg/logproto/logproto.proto | 5 +- pkg/querier/querier.go | 115 ++--- pkg/querier/querier_test.go | 201 ++++---- pkg/querier/queryrange/codec.go | 84 +++- pkg/querier/queryrange/roundtrip.go | 51 ++- pkg/querier/queryrange/split_by_interval.go | 2 +- pkg/querier/queryrange/splitters.go | 16 + pkg/querier/queryrange/stats.go | 4 + pkg/storage/detected/labels.go | 64 +++ 13 files changed, 623 insertions(+), 420 deletions(-) create mode 100644 pkg/storage/detected/labels.go diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index ac6a29e81d43f..640c64eee6b63 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -1397,7 +1397,7 @@ func (i *Ingester) GetDetectedLabels(ctx context.Context, req *logproto.Detected } } - labelMap, err := instance.LabelsWithValues(ctx, *req.Start, matchers...) + labelMap, err := instance.LabelsWithValues(ctx, req.Start, matchers...) if err != nil { return nil, err diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 378123709a067..b31053a5ded17 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -833,8 +833,8 @@ func TestIngester_GetDetectedLabels(t *testing.T) { require.NoError(t, err) res, err := i.GetDetectedLabels(ctx, &logproto.DetectedLabelsRequest{ - Start: &[]time.Time{time.Now().Add(11 * time.Nanosecond)}[0], - End: nil, + Start: []time.Time{time.Now().Add(11 * time.Nanosecond)}[0], + End: []time.Time{time.Now().Add(12 * time.Nanosecond)}[0], Query: "", }) @@ -893,8 +893,8 @@ func TestIngester_GetDetectedLabelsWithQuery(t *testing.T) { require.NoError(t, err) res, err := i.GetDetectedLabels(ctx, &logproto.DetectedLabelsRequest{ - Start: &[]time.Time{time.Now().Add(11 * time.Nanosecond)}[0], - End: nil, + Start: []time.Time{time.Now().Add(11 * time.Nanosecond)}[0], + End: []time.Time{time.Now().Add(11 * time.Nanosecond)}[0], Query: `{foo="bar"}`, }) diff --git a/pkg/loghttp/labels.go b/pkg/loghttp/labels.go index b2c5a343637be..360c750048a5e 100644 --- a/pkg/loghttp/labels.go +++ b/pkg/loghttp/labels.go @@ -1,6 +1,7 @@ package loghttp import ( + "errors" "net/http" "sort" "strconv" @@ -88,14 +89,20 @@ func ParseLabelQuery(r *http.Request) (*logproto.LabelRequest, error) { } func ParseDetectedLabelsQuery(r *http.Request) (*logproto.DetectedLabelsRequest, error) { + var err error + start, end, err := bounds(r) if err != nil { return nil, err } + if end.Before(start) { + return nil, errors.New("end timestamp must not be before or equal to start time") + } + return &logproto.DetectedLabelsRequest{ - Start: &start, - End: &end, + Start: start, + End: end, Query: query(r), }, nil } diff --git a/pkg/logproto/logproto.pb.go b/pkg/logproto/logproto.pb.go index 5ba5e49c1060b..0d0827f5a094c 100644 --- a/pkg/logproto/logproto.pb.go +++ b/pkg/logproto/logproto.pb.go @@ -2890,9 +2890,9 @@ func (m *DetectedField) GetSketch() []byte { } type DetectedLabelsRequest struct { - Start *time.Time `protobuf:"bytes,1,opt,name=start,proto3,stdtime" json:"start,omitempty"` - End *time.Time `protobuf:"bytes,2,opt,name=end,proto3,stdtime" json:"end,omitempty"` - Query string `protobuf:"bytes,3,opt,name=query,proto3" json:"query,omitempty"` + Start time.Time `protobuf:"bytes,1,opt,name=start,proto3,stdtime" json:"start"` + End time.Time `protobuf:"bytes,2,opt,name=end,proto3,stdtime" json:"end"` + Query string `protobuf:"bytes,3,opt,name=query,proto3" json:"query,omitempty"` } func (m *DetectedLabelsRequest) Reset() { *m = DetectedLabelsRequest{} } @@ -2927,18 +2927,18 @@ func (m *DetectedLabelsRequest) XXX_DiscardUnknown() { var xxx_messageInfo_DetectedLabelsRequest proto.InternalMessageInfo -func (m *DetectedLabelsRequest) GetStart() *time.Time { +func (m *DetectedLabelsRequest) GetStart() time.Time { if m != nil { return m.Start } - return nil + return time.Time{} } -func (m *DetectedLabelsRequest) GetEnd() *time.Time { +func (m *DetectedLabelsRequest) GetEnd() time.Time { if m != nil { return m.End } - return nil + return time.Time{} } func (m *DetectedLabelsRequest) GetQuery() string { @@ -2994,6 +2994,7 @@ func (m *DetectedLabelsResponse) GetDetectedLabels() []*DetectedLabel { type DetectedLabel struct { Label string `protobuf:"bytes,1,opt,name=label,proto3" json:"label,omitempty"` Cardinality uint64 `protobuf:"varint,2,opt,name=cardinality,proto3" json:"cardinality,omitempty"` + Sketch []byte `protobuf:"bytes,3,opt,name=sketch,proto3" json:"sketch,omitempty"` } func (m *DetectedLabel) Reset() { *m = DetectedLabel{} } @@ -3042,6 +3043,13 @@ func (m *DetectedLabel) GetCardinality() uint64 { return 0 } +func (m *DetectedLabel) GetSketch() []byte { + if m != nil { + return m.Sketch + } + return nil +} + func init() { proto.RegisterEnum("logproto.Direction", Direction_name, Direction_value) proto.RegisterType((*LabelToValuesResponse)(nil), "logproto.LabelToValuesResponse") @@ -3105,174 +3113,174 @@ func init() { func init() { proto.RegisterFile("pkg/logproto/logproto.proto", fileDescriptor_c28a5f14f1f4c79a) } var fileDescriptor_c28a5f14f1f4c79a = []byte{ - // 2670 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0x4d, 0x8c, 0x5b, 0x47, - 0xd9, 0xcf, 0x7e, 0xfe, 0xfb, 0xec, 0xdd, 0x6c, 0x66, 0x9d, 0xc4, 0xda, 0xa4, 0x7e, 0xdb, 0x11, - 0xb4, 0xa1, 0x49, 0xd7, 0x4d, 0x4a, 0x4b, 0x9a, 0x52, 0x4a, 0xbc, 0xdb, 0x6c, 0x93, 0x6e, 0xd3, - 0x74, 0x36, 0x4d, 0x0b, 0xa2, 0xaa, 0x5e, 0xec, 0x59, 0xef, 0x53, 0xec, 0xf7, 0x9c, 0xf7, 0xc6, - 0x4d, 0xf7, 0x86, 0xc4, 0x19, 0x51, 0x89, 0x03, 0x70, 0x41, 0x42, 0x42, 0x02, 0x81, 0x7a, 0x41, - 0x9c, 0x10, 0x82, 0x0b, 0x87, 0x72, 0x2b, 0xb7, 0xaa, 0x07, 0x43, 0xb7, 0x17, 0xb4, 0xa7, 0x4a, - 0x48, 0x1c, 0x7a, 0x42, 0xf3, 0xf7, 0xde, 0xbc, 0xb7, 0x5e, 0x52, 0x6f, 0x83, 0x4a, 0x2e, 0xf6, - 0xcc, 0x37, 0xdf, 0x7c, 0x33, 0xdf, 0xcf, 0x7c, 0x7f, 0x36, 0x9c, 0x1c, 0xdd, 0xee, 0xb7, 0x07, - 0x41, 0x7f, 0x14, 0x06, 0x2c, 0x88, 0x07, 0x2b, 0xe2, 0x13, 0x55, 0xf4, 0x7c, 0xa9, 0xd1, 0x0f, - 0xfa, 0x81, 0xc4, 0xe1, 0x23, 0xb9, 0xbe, 0xe4, 0xf4, 0x83, 0xa0, 0x3f, 0xa0, 0x6d, 0x31, 0xbb, - 0x35, 0xde, 0x6a, 0x33, 0x6f, 0x48, 0x23, 0xe6, 0x0e, 0x47, 0x0a, 0x61, 0x59, 0x51, 0xbf, 0x33, - 0x18, 0x06, 0x3d, 0x3a, 0x68, 0x47, 0xcc, 0x65, 0x91, 0xfc, 0x54, 0x18, 0x8b, 0x1c, 0x63, 0x34, - 0x8e, 0xb6, 0xc5, 0x87, 0x04, 0xe2, 0xdf, 0x5b, 0x70, 0x6c, 0xc3, 0xbd, 0x45, 0x07, 0x37, 0x82, - 0x9b, 0xee, 0x60, 0x4c, 0x23, 0x42, 0xa3, 0x51, 0xe0, 0x47, 0x14, 0xad, 0x42, 0x69, 0xc0, 0x17, - 0xa2, 0xa6, 0xb5, 0x5c, 0x38, 0x5d, 0x3b, 0x7f, 0x66, 0x25, 0xbe, 0xf2, 0xd4, 0x0d, 0x12, 0x1a, - 0xbd, 0xe0, 0xb3, 0x70, 0x87, 0xa8, 0xad, 0x4b, 0x37, 0xa1, 0x66, 0x80, 0xd1, 0x02, 0x14, 0x6e, - 0xd3, 0x9d, 0xa6, 0xb5, 0x6c, 0x9d, 0xae, 0x12, 0x3e, 0x44, 0xe7, 0xa0, 0xf8, 0x36, 0x27, 0xd3, - 0xcc, 0x2f, 0x5b, 0xa7, 0x6b, 0xe7, 0x4f, 0x26, 0x87, 0xbc, 0xe6, 0x7b, 0x77, 0xc6, 0x54, 0xec, - 0x56, 0x07, 0x49, 0xcc, 0x8b, 0xf9, 0x0b, 0x16, 0x3e, 0x03, 0x47, 0xf7, 0xad, 0xa3, 0xe3, 0x50, - 0x12, 0x18, 0xf2, 0xc6, 0x55, 0xa2, 0x66, 0xb8, 0x01, 0x68, 0x93, 0x85, 0xd4, 0x1d, 0x12, 0x97, - 0xf1, 0xfb, 0xde, 0x19, 0xd3, 0x88, 0xe1, 0x97, 0x61, 0x31, 0x05, 0x55, 0x6c, 0x3f, 0x0d, 0xb5, - 0x28, 0x01, 0x2b, 0xde, 0x1b, 0xc9, 0xb5, 0x92, 0x3d, 0xc4, 0x44, 0xc4, 0x3f, 0xb7, 0x00, 0x92, - 0x35, 0xd4, 0x02, 0x90, 0xab, 0x2f, 0xba, 0xd1, 0xb6, 0x60, 0xd8, 0x26, 0x06, 0x04, 0x9d, 0x85, - 0xa3, 0xc9, 0xec, 0x5a, 0xb0, 0xb9, 0xed, 0x86, 0x3d, 0x21, 0x03, 0x9b, 0xec, 0x5f, 0x40, 0x08, - 0xec, 0xd0, 0x65, 0xb4, 0x59, 0x58, 0xb6, 0x4e, 0x17, 0x88, 0x18, 0x73, 0x6e, 0x19, 0xf5, 0x5d, - 0x9f, 0x35, 0x6d, 0x21, 0x4e, 0x35, 0xe3, 0x70, 0xae, 0x5f, 0x1a, 0x35, 0x8b, 0xcb, 0xd6, 0xe9, - 0x39, 0xa2, 0x66, 0xf8, 0xdf, 0x05, 0xa8, 0xbf, 0x3a, 0xa6, 0xe1, 0x8e, 0x12, 0x00, 0x6a, 0x41, - 0x25, 0xa2, 0x03, 0xda, 0x65, 0x41, 0x28, 0x35, 0xd2, 0xc9, 0x37, 0x2d, 0x12, 0xc3, 0x50, 0x03, - 0x8a, 0x03, 0x6f, 0xe8, 0x31, 0x71, 0xad, 0x39, 0x22, 0x27, 0xe8, 0x22, 0x14, 0x23, 0xe6, 0x86, - 0x4c, 0xdc, 0xa5, 0x76, 0x7e, 0x69, 0x45, 0x1a, 0xe6, 0x8a, 0x36, 0xcc, 0x95, 0x1b, 0xda, 0x30, - 0x3b, 0x95, 0xf7, 0x27, 0x4e, 0xee, 0xdd, 0xbf, 0x3b, 0x16, 0x91, 0x5b, 0xd0, 0xd3, 0x50, 0xa0, - 0x7e, 0x4f, 0xdc, 0xf7, 0xf3, 0xee, 0xe4, 0x1b, 0xd0, 0x39, 0xa8, 0xf6, 0xbc, 0x90, 0x76, 0x99, - 0x17, 0xf8, 0x82, 0xab, 0xf9, 0xf3, 0x8b, 0x89, 0x46, 0xd6, 0xf4, 0x12, 0x49, 0xb0, 0xd0, 0x59, - 0x28, 0x45, 0x5c, 0x74, 0x51, 0xb3, 0xcc, 0x6d, 0xa1, 0xd3, 0xd8, 0x9b, 0x38, 0x0b, 0x12, 0x72, - 0x36, 0x18, 0x7a, 0x8c, 0x0e, 0x47, 0x6c, 0x87, 0x28, 0x1c, 0xf4, 0x18, 0x94, 0x7b, 0x74, 0x40, - 0xb9, 0xc2, 0x2b, 0x42, 0xe1, 0x0b, 0x06, 0x79, 0xb1, 0x40, 0x34, 0x02, 0x7a, 0x13, 0xec, 0xd1, - 0xc0, 0xf5, 0x9b, 0x55, 0xc1, 0xc5, 0x7c, 0x82, 0x78, 0x7d, 0xe0, 0xfa, 0x9d, 0x67, 0x3e, 0x9a, - 0x38, 0x4f, 0xf5, 0x3d, 0xb6, 0x3d, 0xbe, 0xb5, 0xd2, 0x0d, 0x86, 0xed, 0x7e, 0xe8, 0x6e, 0xb9, - 0xbe, 0xdb, 0x1e, 0x04, 0xb7, 0xbd, 0xf6, 0xdb, 0x4f, 0xb6, 0xf9, 0x1b, 0xbc, 0x33, 0xa6, 0xa1, - 0x47, 0xc3, 0x36, 0x27, 0xb3, 0x22, 0x54, 0xc2, 0xb7, 0x12, 0x41, 0x16, 0x5d, 0xe5, 0xf6, 0x17, - 0x84, 0x74, 0x75, 0x7b, 0xec, 0xdf, 0x8e, 0x9a, 0x20, 0x4e, 0x39, 0x91, 0x9c, 0x22, 0xe0, 0x84, - 0x6e, 0xad, 0x87, 0xc1, 0x78, 0xd4, 0x39, 0xb2, 0x37, 0x71, 0x4c, 0x7c, 0x62, 0x4e, 0xae, 0xda, - 0x95, 0xd2, 0x42, 0x19, 0xbf, 0x57, 0x00, 0xb4, 0xe9, 0x0e, 0x47, 0x03, 0x3a, 0x93, 0xfa, 0x63, - 0x45, 0xe7, 0x0f, 0xad, 0xe8, 0xc2, 0xac, 0x8a, 0x4e, 0xb4, 0x66, 0xcf, 0xa6, 0xb5, 0xe2, 0xe7, - 0xd5, 0x5a, 0xe9, 0xff, 0x5e, 0x6b, 0xb8, 0x09, 0x36, 0xa7, 0xcc, 0x9d, 0x65, 0xe8, 0xde, 0x15, - 0xba, 0xa9, 0x13, 0x3e, 0xc4, 0x1b, 0x50, 0x92, 0x7c, 0xa1, 0xa5, 0xac, 0xf2, 0xd2, 0xef, 0x36, - 0x51, 0x5c, 0x41, 0xab, 0x64, 0x21, 0x51, 0x49, 0x41, 0x08, 0x1b, 0xff, 0xd1, 0x82, 0x39, 0x65, - 0x11, 0xca, 0xf7, 0xdd, 0x82, 0xb2, 0xf4, 0x3d, 0xda, 0xef, 0x9d, 0xc8, 0xfa, 0xbd, 0x4b, 0x3d, - 0x77, 0xc4, 0x68, 0xd8, 0x69, 0xbf, 0x3f, 0x71, 0xac, 0x8f, 0x26, 0xce, 0xa3, 0x07, 0x09, 0x4d, - 0xc7, 0x1a, 0xed, 0x2f, 0x35, 0x61, 0x74, 0x46, 0xdc, 0x8e, 0x45, 0xca, 0xac, 0x8e, 0xac, 0xc8, - 0x10, 0x75, 0xc5, 0xef, 0xd3, 0x88, 0x53, 0xb6, 0xb9, 0x45, 0x10, 0x89, 0xc3, 0xd9, 0xbc, 0xeb, - 0x86, 0xbe, 0xe7, 0xf7, 0xa3, 0x66, 0x41, 0xf8, 0xf4, 0x78, 0x8e, 0x7f, 0x6a, 0xc1, 0x62, 0xca, - 0xac, 0x15, 0x13, 0x17, 0xa0, 0x14, 0x71, 0x4d, 0x69, 0x1e, 0x0c, 0xa3, 0xd8, 0x14, 0xf0, 0xce, - 0xbc, 0xba, 0x7c, 0x49, 0xce, 0x89, 0xc2, 0xbf, 0x7f, 0x57, 0xfb, 0x8b, 0x05, 0x75, 0x11, 0x98, - 0xf4, 0x5b, 0x43, 0x60, 0xfb, 0xee, 0x90, 0x2a, 0x55, 0x89, 0xb1, 0x11, 0xad, 0xf8, 0x71, 0x15, - 0x1d, 0xad, 0x66, 0x75, 0xb0, 0xd6, 0xa1, 0x1d, 0xac, 0x95, 0xbc, 0xbb, 0x06, 0x14, 0xb9, 0x79, - 0xef, 0x08, 0xe7, 0x5a, 0x25, 0x72, 0x82, 0x1f, 0x85, 0x39, 0xc5, 0x85, 0x12, 0xed, 0x41, 0x01, - 0x76, 0x08, 0x25, 0xa9, 0x09, 0xf4, 0x15, 0xa8, 0xc6, 0x89, 0x89, 0xe0, 0xb6, 0xd0, 0x29, 0xed, - 0x4d, 0x9c, 0x3c, 0x8b, 0x48, 0xb2, 0x80, 0x1c, 0x33, 0xe8, 0x5b, 0x9d, 0xea, 0xde, 0xc4, 0x91, - 0x00, 0x15, 0xe2, 0xd1, 0x29, 0xb0, 0xb7, 0x79, 0xdc, 0xe4, 0x22, 0xb0, 0x3b, 0x95, 0xbd, 0x89, - 0x23, 0xe6, 0x44, 0x7c, 0xe2, 0x75, 0xa8, 0x6f, 0xd0, 0xbe, 0xdb, 0xdd, 0x51, 0x87, 0x36, 0x34, - 0x39, 0x7e, 0xa0, 0xa5, 0x69, 0x3c, 0x0c, 0xf5, 0xf8, 0xc4, 0xb7, 0x86, 0x91, 0x7a, 0x0d, 0xb5, - 0x18, 0xf6, 0x72, 0x84, 0x7f, 0x66, 0x81, 0xb2, 0x01, 0x84, 0x8d, 0x6c, 0x87, 0xfb, 0x42, 0xd8, - 0x9b, 0x38, 0x0a, 0xa2, 0x93, 0x19, 0xf4, 0x2c, 0x94, 0x23, 0x71, 0x22, 0x27, 0x96, 0x35, 0x2d, - 0xb1, 0xd0, 0x39, 0xc2, 0x4d, 0x64, 0x6f, 0xe2, 0x68, 0x44, 0xa2, 0x07, 0x68, 0x25, 0x95, 0x10, - 0x48, 0xc6, 0xe6, 0xf7, 0x26, 0x8e, 0x01, 0x35, 0x13, 0x04, 0xfc, 0x99, 0x05, 0xb5, 0x1b, 0xae, - 0x17, 0x9b, 0x50, 0x53, 0xab, 0x28, 0xf1, 0xd5, 0x12, 0xc0, 0x2d, 0xb1, 0x47, 0x07, 0xee, 0xce, - 0xe5, 0x20, 0x14, 0x74, 0xe7, 0x48, 0x3c, 0x4f, 0x62, 0xb8, 0x3d, 0x35, 0x86, 0x17, 0x67, 0x77, - 0xed, 0xff, 0x5b, 0x47, 0x7a, 0xd5, 0xae, 0xe4, 0x17, 0x0a, 0xf8, 0x3d, 0x0b, 0xea, 0x92, 0x79, - 0x65, 0x79, 0xdf, 0x83, 0x92, 0x94, 0x8d, 0x60, 0xff, 0xbf, 0x38, 0xa6, 0x33, 0xb3, 0x38, 0x25, - 0x45, 0x13, 0x3d, 0x0f, 0xf3, 0xbd, 0x30, 0x18, 0x8d, 0x68, 0x6f, 0x53, 0xb9, 0xbf, 0x7c, 0xd6, - 0xfd, 0xad, 0x99, 0xeb, 0x24, 0x83, 0x8e, 0xff, 0x6a, 0xc1, 0x9c, 0x72, 0x26, 0x4a, 0x5d, 0xb1, - 0x88, 0xad, 0x43, 0x47, 0xcf, 0xfc, 0xac, 0xd1, 0xf3, 0x38, 0x94, 0xfa, 0x3c, 0xbe, 0x68, 0x87, - 0xa4, 0x66, 0xb3, 0x45, 0x55, 0x7c, 0x15, 0xe6, 0x35, 0x2b, 0x07, 0x78, 0xd4, 0xa5, 0xac, 0x47, - 0xbd, 0xd2, 0xa3, 0x3e, 0xf3, 0xb6, 0xbc, 0xd8, 0x47, 0x2a, 0x7c, 0xfc, 0x23, 0x0b, 0x16, 0xb2, - 0x28, 0x68, 0x2d, 0x53, 0x58, 0x3c, 0x72, 0x30, 0x39, 0xb3, 0xa6, 0xd0, 0xa4, 0x55, 0x65, 0xf1, - 0xd4, 0xbd, 0x2a, 0x8b, 0x86, 0xe9, 0x64, 0xaa, 0xca, 0x2b, 0xe0, 0x9f, 0x58, 0x30, 0x97, 0xd2, - 0x25, 0xba, 0x00, 0xf6, 0x56, 0x18, 0x0c, 0x67, 0x52, 0x94, 0xd8, 0x81, 0xbe, 0x0e, 0x79, 0x16, - 0xcc, 0xa4, 0xa6, 0x3c, 0x0b, 0xb8, 0x96, 0x14, 0xfb, 0x05, 0x99, 0xb7, 0xcb, 0x19, 0x7e, 0x0a, - 0xaa, 0x82, 0xa1, 0xeb, 0xae, 0x17, 0x4e, 0x0d, 0x18, 0xd3, 0x19, 0x7a, 0x16, 0x8e, 0x48, 0x67, - 0x38, 0x7d, 0x73, 0x7d, 0xda, 0xe6, 0xba, 0xde, 0x7c, 0x12, 0x8a, 0x22, 0xe9, 0xe0, 0x5b, 0x7a, - 0x2e, 0x73, 0xf5, 0x16, 0x3e, 0xc6, 0xc7, 0x60, 0x91, 0xbf, 0x41, 0x1a, 0x46, 0xab, 0xc1, 0xd8, - 0x67, 0xba, 0x6e, 0x3a, 0x0b, 0x8d, 0x34, 0x58, 0x59, 0x49, 0x03, 0x8a, 0x5d, 0x0e, 0x10, 0x34, - 0xe6, 0x88, 0x9c, 0xe0, 0x5f, 0x5a, 0x80, 0xd6, 0x29, 0x13, 0xa7, 0x5c, 0x59, 0x8b, 0x9f, 0xc7, - 0x12, 0x54, 0x86, 0x2e, 0xeb, 0x6e, 0xd3, 0x30, 0xd2, 0xf9, 0x8b, 0x9e, 0x7f, 0x19, 0x89, 0x27, - 0x3e, 0x07, 0x8b, 0xa9, 0x5b, 0x2a, 0x9e, 0x96, 0xa0, 0xd2, 0x55, 0x30, 0x15, 0xf2, 0xe2, 0x39, - 0xfe, 0x5d, 0x1e, 0x2a, 0x3a, 0xad, 0x43, 0xe7, 0xa0, 0xb6, 0xe5, 0xf9, 0x7d, 0x1a, 0x8e, 0x42, - 0x4f, 0x89, 0xc0, 0x96, 0x69, 0x9e, 0x01, 0x26, 0xe6, 0x04, 0x3d, 0x0e, 0xe5, 0x71, 0x44, 0xc3, - 0xb7, 0x3c, 0xf9, 0xd2, 0xab, 0x9d, 0xc6, 0xee, 0xc4, 0x29, 0xbd, 0x16, 0xd1, 0xf0, 0xca, 0x1a, - 0x0f, 0x3e, 0x63, 0x31, 0x22, 0xf2, 0xbb, 0x87, 0x5e, 0x52, 0x66, 0x2a, 0x12, 0xb8, 0xce, 0x37, - 0xf8, 0xf5, 0x33, 0xae, 0x6e, 0x14, 0x06, 0x43, 0xca, 0xb6, 0xe9, 0x38, 0x6a, 0x77, 0x83, 0xe1, - 0x30, 0xf0, 0xdb, 0xa2, 0x13, 0x20, 0x98, 0xe6, 0x11, 0x94, 0x6f, 0x57, 0x96, 0x7b, 0x03, 0xca, - 0x6c, 0x3b, 0x0c, 0xc6, 0xfd, 0x6d, 0x11, 0x18, 0x0a, 0x9d, 0x8b, 0xb3, 0xd3, 0xd3, 0x14, 0x88, - 0x1e, 0xa0, 0x87, 0xb9, 0xb4, 0x68, 0xf7, 0x76, 0x34, 0x1e, 0xca, 0xda, 0xb3, 0x53, 0xdc, 0x9b, - 0x38, 0xd6, 0xe3, 0x24, 0x06, 0xe3, 0x4b, 0x30, 0x97, 0x4a, 0x85, 0xd1, 0x13, 0x60, 0x87, 0x74, - 0x4b, 0xbb, 0x02, 0xb4, 0x3f, 0x63, 0x96, 0xd1, 0x9f, 0xe3, 0x10, 0xf1, 0x89, 0x7f, 0x98, 0x07, - 0xc7, 0xa8, 0xfa, 0x2f, 0x07, 0xe1, 0xcb, 0x94, 0x85, 0x5e, 0xf7, 0x9a, 0x3b, 0xa4, 0xda, 0xbc, - 0x1c, 0xa8, 0x0d, 0x05, 0xf0, 0x2d, 0xe3, 0x15, 0xc1, 0x30, 0xc6, 0x43, 0x0f, 0x01, 0x88, 0x67, - 0x27, 0xd7, 0xe5, 0x83, 0xaa, 0x0a, 0x88, 0x58, 0x5e, 0x4d, 0x09, 0xbb, 0x3d, 0xa3, 0x70, 0x94, - 0x90, 0xaf, 0x64, 0x85, 0x3c, 0x33, 0x9d, 0x58, 0xb2, 0xe6, 0x73, 0x29, 0xa6, 0x9f, 0x0b, 0xfe, - 0x9b, 0x05, 0xad, 0x0d, 0x7d, 0xf3, 0x43, 0x8a, 0x43, 0xf3, 0x9b, 0xbf, 0x4f, 0xfc, 0x16, 0xbe, - 0x18, 0xbf, 0xb8, 0x05, 0xb0, 0xe1, 0xf9, 0xf4, 0xb2, 0x37, 0x60, 0x34, 0x9c, 0x52, 0x08, 0xfd, - 0xb8, 0x90, 0x78, 0x15, 0x42, 0xb7, 0x34, 0x9f, 0xab, 0x86, 0x2b, 0xbf, 0x1f, 0x6c, 0xe4, 0xef, - 0xa3, 0xda, 0x0a, 0x19, 0x2f, 0xe7, 0x43, 0x79, 0x4b, 0xb0, 0x27, 0xa3, 0x72, 0xaa, 0xc7, 0x94, - 0xf0, 0xde, 0xf9, 0x96, 0x3a, 0xfc, 0xe9, 0x7b, 0x24, 0x55, 0xa2, 0xf3, 0xd7, 0x8e, 0x76, 0x7c, - 0xe6, 0xbe, 0x63, 0xec, 0x27, 0xfa, 0x10, 0xe4, 0xaa, 0xbc, 0xad, 0x38, 0x35, 0x6f, 0x7b, 0x4e, - 0x1d, 0xf3, 0x45, 0x72, 0x37, 0xfc, 0x5c, 0xe2, 0x44, 0x85, 0x52, 0x94, 0x13, 0x7d, 0xe4, 0x5e, - 0x4f, 0x5c, 0x3d, 0xec, 0x3f, 0x59, 0xb0, 0xb0, 0x4e, 0x59, 0x3a, 0x8f, 0x7a, 0x80, 0x54, 0x8a, - 0x5f, 0x84, 0xa3, 0xc6, 0xfd, 0x15, 0xf7, 0x4f, 0x66, 0x92, 0xa7, 0x63, 0x09, 0xff, 0x57, 0xfc, - 0x1e, 0x7d, 0x47, 0xd5, 0xa4, 0xe9, 0xbc, 0xe9, 0x3a, 0xd4, 0x8c, 0x45, 0x74, 0x29, 0x93, 0x31, - 0x2d, 0x66, 0x5a, 0xb1, 0x3c, 0xea, 0x77, 0x1a, 0x8a, 0x27, 0x59, 0x79, 0xaa, 0x7c, 0x38, 0xce, - 0x2e, 0x36, 0x01, 0x09, 0x75, 0x09, 0xb2, 0x66, 0x7c, 0x13, 0xd0, 0x97, 0xe2, 0xd4, 0x29, 0x9e, - 0xa3, 0x87, 0xc1, 0x0e, 0x83, 0xbb, 0x3a, 0x15, 0x9e, 0x4b, 0x8e, 0x24, 0xc1, 0x5d, 0x22, 0x96, - 0xf0, 0xb3, 0x50, 0x20, 0xc1, 0x5d, 0xd4, 0x02, 0x08, 0x5d, 0xbf, 0x4f, 0x6f, 0xc6, 0x45, 0x58, - 0x9d, 0x18, 0x90, 0x03, 0x72, 0x8f, 0x55, 0x38, 0x6a, 0xde, 0x48, 0xaa, 0x7b, 0x05, 0xca, 0xaf, - 0x8e, 0x4d, 0x71, 0x35, 0x32, 0xe2, 0x92, 0xb5, 0xbe, 0x46, 0xe2, 0x36, 0x03, 0x09, 0x1c, 0x9d, - 0x82, 0x2a, 0x73, 0x6f, 0x0d, 0xe8, 0xb5, 0xc4, 0xcd, 0x25, 0x00, 0xbe, 0xca, 0xeb, 0xc7, 0x9b, - 0x46, 0x12, 0x95, 0x00, 0xd0, 0x63, 0xb0, 0x90, 0xdc, 0xf9, 0x7a, 0x48, 0xb7, 0xbc, 0x77, 0x84, - 0x86, 0xeb, 0x64, 0x1f, 0x1c, 0x9d, 0x86, 0x23, 0x09, 0x6c, 0x53, 0x24, 0x2b, 0xb6, 0x40, 0xcd, - 0x82, 0xb9, 0x6c, 0x04, 0xbb, 0x2f, 0xdc, 0x19, 0xbb, 0x03, 0xf1, 0xf8, 0xea, 0xc4, 0x80, 0xe0, - 0x3f, 0x5b, 0x70, 0x54, 0xaa, 0x9a, 0xb9, 0xec, 0x81, 0xb4, 0xfa, 0x5f, 0x59, 0x80, 0x4c, 0x0e, - 0x94, 0x69, 0x7d, 0xd5, 0xec, 0x25, 0xf1, 0x6c, 0xa8, 0x26, 0xca, 0x62, 0x09, 0x4a, 0xda, 0x41, - 0x18, 0x4a, 0x5d, 0xd9, 0x33, 0x13, 0xcd, 0x6f, 0x59, 0x77, 0x4b, 0x08, 0x51, 0xdf, 0xc8, 0x81, - 0xe2, 0xad, 0x1d, 0x46, 0x23, 0x55, 0x35, 0x8b, 0x76, 0x81, 0x00, 0x10, 0xf9, 0xc5, 0xcf, 0xa2, - 0x3e, 0x13, 0x56, 0x63, 0x27, 0x67, 0x29, 0x10, 0xd1, 0x03, 0xfc, 0xdb, 0x3c, 0xcc, 0xdd, 0x0c, - 0x06, 0xe3, 0x24, 0x30, 0x3e, 0x48, 0x01, 0x23, 0x55, 0xca, 0x17, 0x75, 0x29, 0x8f, 0xc0, 0x8e, - 0x18, 0x1d, 0x09, 0xcb, 0x2a, 0x10, 0x31, 0x46, 0x18, 0xea, 0xcc, 0x0d, 0xfb, 0x94, 0xc9, 0x02, - 0xa9, 0x59, 0x12, 0x99, 0x6b, 0x0a, 0x86, 0x96, 0xa1, 0xe6, 0xf6, 0xfb, 0x21, 0xed, 0xbb, 0x8c, - 0x76, 0x76, 0x9a, 0x65, 0x71, 0x98, 0x09, 0xc2, 0x6f, 0xc0, 0xbc, 0x16, 0x96, 0x52, 0xe9, 0x13, - 0x50, 0x7e, 0x5b, 0x40, 0xa6, 0xb4, 0xd6, 0x24, 0xaa, 0x72, 0x63, 0x1a, 0x2d, 0xfd, 0x13, 0x82, - 0xbe, 0x33, 0xbe, 0x0a, 0x25, 0x89, 0x8e, 0x4e, 0x99, 0x65, 0x8e, 0xcc, 0xf4, 0xf8, 0x5c, 0xd5, - 0x2c, 0x18, 0x4a, 0x92, 0x90, 0x52, 0xbc, 0xb0, 0x0d, 0x09, 0x21, 0xea, 0x1b, 0xff, 0xcb, 0x82, - 0x63, 0x6b, 0x94, 0xd1, 0x2e, 0xa3, 0xbd, 0xcb, 0x1e, 0x1d, 0xf4, 0xbe, 0xd4, 0x0a, 0x3c, 0xee, - 0xa3, 0x15, 0x8c, 0x3e, 0x1a, 0xf7, 0x3b, 0x03, 0xcf, 0xa7, 0x1b, 0x46, 0x23, 0x26, 0x01, 0x70, - 0x0f, 0xb1, 0xc5, 0x2f, 0x2e, 0x97, 0xe5, 0x6f, 0x36, 0x06, 0x24, 0xd6, 0x70, 0x29, 0xd1, 0x30, - 0xfe, 0x81, 0x05, 0xc7, 0xb3, 0x5c, 0x2b, 0x25, 0xb5, 0xa1, 0x24, 0x36, 0x4f, 0x69, 0xe1, 0xa6, - 0x76, 0x10, 0x85, 0x86, 0x2e, 0xa4, 0xce, 0x17, 0xbf, 0xf5, 0x74, 0x9a, 0x7b, 0x13, 0xa7, 0x91, - 0x40, 0x8d, 0x2e, 0x81, 0x81, 0x8b, 0xff, 0xc0, 0x6b, 0x69, 0x93, 0xa6, 0xd0, 0x37, 0xb7, 0x2f, - 0xe5, 0x7b, 0xe5, 0x04, 0x7d, 0x0d, 0x6c, 0xb6, 0x33, 0x52, 0x2e, 0xb7, 0x73, 0xec, 0xb3, 0x89, - 0x73, 0x34, 0xb5, 0xed, 0xc6, 0xce, 0x88, 0x12, 0x81, 0xc2, 0xcd, 0xb2, 0xeb, 0x86, 0x3d, 0xcf, - 0x77, 0x07, 0x1e, 0x93, 0x62, 0xb4, 0x89, 0x09, 0x42, 0x4d, 0x28, 0x8f, 0xdc, 0x30, 0xd2, 0x79, - 0x53, 0x95, 0xe8, 0xa9, 0x68, 0x73, 0xdc, 0xa6, 0xac, 0xbb, 0x2d, 0xdd, 0xac, 0x6a, 0x73, 0x08, - 0x48, 0xaa, 0xcd, 0x21, 0x20, 0xf8, 0x17, 0x86, 0xe1, 0xc8, 0x37, 0x71, 0x48, 0xc3, 0xb1, 0x0e, - 0x6d, 0x38, 0xd6, 0x3d, 0x0c, 0x07, 0x7f, 0x27, 0xd1, 0xb2, 0xbe, 0xa2, 0xd2, 0xf2, 0xf3, 0x30, - 0xdf, 0x4b, 0xad, 0x1c, 0xac, 0x6d, 0xd9, 0xc2, 0xcd, 0xa0, 0xe3, 0xf5, 0x44, 0x75, 0x02, 0x72, - 0x80, 0xea, 0x32, 0xfa, 0xc8, 0xef, 0xd3, 0xc7, 0x63, 0x8f, 0x40, 0x35, 0xfe, 0x01, 0x0e, 0xd5, - 0xa0, 0x7c, 0xf9, 0x15, 0xf2, 0xfa, 0x25, 0xb2, 0xb6, 0x90, 0x43, 0x75, 0xa8, 0x74, 0x2e, 0xad, - 0xbe, 0x24, 0x66, 0xd6, 0xf9, 0xdf, 0x94, 0x74, 0x68, 0x0f, 0xd1, 0x37, 0xa1, 0x28, 0xe3, 0xf5, - 0xf1, 0xe4, 0xba, 0xe6, 0x6f, 0x53, 0x4b, 0x27, 0xf6, 0xc1, 0x25, 0xdf, 0x38, 0xf7, 0x84, 0x85, - 0xae, 0x41, 0x4d, 0x00, 0x55, 0xf7, 0xf7, 0x54, 0xb6, 0x09, 0x9b, 0xa2, 0xf4, 0xd0, 0x01, 0xab, - 0x06, 0xbd, 0x8b, 0x50, 0x94, 0x22, 0x38, 0x9e, 0x49, 0xab, 0xa6, 0xdc, 0x26, 0xd5, 0x0f, 0xc7, - 0x39, 0xf4, 0x0c, 0xd8, 0x37, 0x5c, 0x6f, 0x80, 0x8c, 0xac, 0xce, 0x68, 0xda, 0x2e, 0x1d, 0xcf, - 0x82, 0x8d, 0x63, 0x9f, 0x8b, 0x7b, 0xcf, 0x27, 0xb2, 0x0d, 0x30, 0xbd, 0xbd, 0xb9, 0x7f, 0x21, - 0x3e, 0xf9, 0x15, 0xd9, 0x21, 0xd5, 0x6d, 0x18, 0xf4, 0x50, 0xfa, 0xa8, 0x4c, 0xd7, 0x66, 0xa9, - 0x75, 0xd0, 0x72, 0x4c, 0x70, 0x03, 0x6a, 0x46, 0x0b, 0xc4, 0x14, 0xeb, 0xfe, 0xfe, 0x8d, 0x29, - 0xd6, 0x29, 0x7d, 0x13, 0x9c, 0x43, 0xeb, 0x50, 0xe1, 0xb9, 0xb0, 0xf8, 0xa9, 0xe4, 0x64, 0x36, - 0xe5, 0x35, 0x52, 0x9d, 0xa5, 0x53, 0xd3, 0x17, 0x63, 0x42, 0xdf, 0x86, 0xea, 0x3a, 0x65, 0x2a, - 0x5e, 0x9c, 0xc8, 0x06, 0x9c, 0x29, 0x92, 0x4a, 0x07, 0x2d, 0x9c, 0x43, 0x6f, 0x88, 0xb4, 0x3c, - 0xed, 0x2e, 0x91, 0x73, 0x80, 0x5b, 0x8c, 0xef, 0xb5, 0x7c, 0x30, 0x42, 0x4c, 0xf9, 0xf5, 0x14, - 0x65, 0x15, 0x59, 0x9d, 0x03, 0x9e, 0x60, 0x4c, 0xd9, 0xb9, 0xc7, 0x1f, 0x29, 0x70, 0xee, 0xfc, - 0x9b, 0xfa, 0xbf, 0x04, 0x6b, 0x2e, 0x73, 0xd1, 0x2b, 0x30, 0x2f, 0x64, 0x19, 0xff, 0xd9, 0x20, - 0x65, 0xf3, 0xfb, 0xfe, 0xd9, 0x90, 0xb2, 0xf9, 0xfd, 0xff, 0x70, 0xc0, 0xb9, 0xce, 0x9b, 0x1f, - 0x7c, 0xdc, 0xca, 0x7d, 0xf8, 0x71, 0x2b, 0xf7, 0xe9, 0xc7, 0x2d, 0xeb, 0xfb, 0xbb, 0x2d, 0xeb, - 0xd7, 0xbb, 0x2d, 0xeb, 0xfd, 0xdd, 0x96, 0xf5, 0xc1, 0x6e, 0xcb, 0xfa, 0xc7, 0x6e, 0xcb, 0xfa, - 0xe7, 0x6e, 0x2b, 0xf7, 0xe9, 0x6e, 0xcb, 0x7a, 0xf7, 0x93, 0x56, 0xee, 0x83, 0x4f, 0x5a, 0xb9, - 0x0f, 0x3f, 0x69, 0xe5, 0xbe, 0xfb, 0xe8, 0xbd, 0x4b, 0x50, 0xe9, 0xe8, 0x4a, 0xe2, 0xeb, 0xc9, - 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x04, 0x88, 0x1d, 0xe6, 0xf1, 0x22, 0x00, 0x00, + // 2671 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x3a, 0x4d, 0x6c, 0x1b, 0xc7, + 0xd5, 0x5c, 0x72, 0xf9, 0xf7, 0x48, 0xc9, 0xf2, 0x88, 0xb6, 0x09, 0xd9, 0xe1, 0x2a, 0x83, 0xef, + 0x4b, 0xdc, 0xd8, 0x11, 0x63, 0xa7, 0x49, 0x1d, 0xa7, 0x69, 0x6a, 0x4a, 0xb1, 0x63, 0x47, 0x71, + 0x9c, 0x91, 0xe3, 0xa4, 0x45, 0x83, 0x60, 0x4d, 0x8e, 0xa8, 0x85, 0xc9, 0x5d, 0x7a, 0x77, 0x18, + 0x47, 0xb7, 0x02, 0x3d, 0x17, 0x0d, 0xd0, 0x43, 0xdb, 0x4b, 0x81, 0x02, 0x05, 0x5a, 0xb4, 0xc8, + 0xa5, 0xe8, 0xa9, 0x28, 0xda, 0x4b, 0x0f, 0xe9, 0x2d, 0xbd, 0x05, 0x39, 0xb0, 0x8d, 0x72, 0x29, + 0x74, 0x0a, 0x50, 0xa0, 0x87, 0x9c, 0x8a, 0xf9, 0xdb, 0x9d, 0x5d, 0x91, 0x75, 0xe8, 0xb8, 0x48, + 0x7c, 0x21, 0x67, 0xde, 0xbc, 0x79, 0x33, 0xef, 0x67, 0xde, 0x1f, 0x09, 0xc7, 0x47, 0xb7, 0xfa, + 0xed, 0x41, 0xd0, 0x1f, 0x85, 0x01, 0x0b, 0xe2, 0xc1, 0x9a, 0xf8, 0x44, 0x15, 0x3d, 0x5f, 0x69, + 0xf4, 0x83, 0x7e, 0x20, 0x71, 0xf8, 0x48, 0xae, 0xaf, 0x38, 0xfd, 0x20, 0xe8, 0x0f, 0x68, 0x5b, + 0xcc, 0x6e, 0x8e, 0xb7, 0xdb, 0xcc, 0x1b, 0xd2, 0x88, 0xb9, 0xc3, 0x91, 0x42, 0x58, 0x55, 0xd4, + 0x6f, 0x0f, 0x86, 0x41, 0x8f, 0x0e, 0xda, 0x11, 0x73, 0x59, 0x24, 0x3f, 0x15, 0xc6, 0x32, 0xc7, + 0x18, 0x8d, 0xa3, 0x1d, 0xf1, 0x21, 0x81, 0xf8, 0xf7, 0x16, 0x1c, 0xd9, 0x74, 0x6f, 0xd2, 0xc1, + 0xf5, 0xe0, 0x86, 0x3b, 0x18, 0xd3, 0x88, 0xd0, 0x68, 0x14, 0xf8, 0x11, 0x45, 0xeb, 0x50, 0x1a, + 0xf0, 0x85, 0xa8, 0x69, 0xad, 0x16, 0x4e, 0xd6, 0xce, 0x9e, 0x5a, 0x8b, 0xaf, 0x3c, 0x75, 0x83, + 0x84, 0x46, 0x2f, 0xf8, 0x2c, 0xdc, 0x25, 0x6a, 0xeb, 0xca, 0x0d, 0xa8, 0x19, 0x60, 0xb4, 0x04, + 0x85, 0x5b, 0x74, 0xb7, 0x69, 0xad, 0x5a, 0x27, 0xab, 0x84, 0x0f, 0xd1, 0x19, 0x28, 0xbe, 0xcd, + 0xc9, 0x34, 0xf3, 0xab, 0xd6, 0xc9, 0xda, 0xd9, 0xe3, 0xc9, 0x21, 0xaf, 0xf9, 0xde, 0xed, 0x31, + 0x15, 0xbb, 0xd5, 0x41, 0x12, 0xf3, 0x7c, 0xfe, 0x9c, 0x85, 0x4f, 0xc1, 0xe1, 0x03, 0xeb, 0xe8, + 0x28, 0x94, 0x04, 0x86, 0xbc, 0x71, 0x95, 0xa8, 0x19, 0x6e, 0x00, 0xda, 0x62, 0x21, 0x75, 0x87, + 0xc4, 0x65, 0xfc, 0xbe, 0xb7, 0xc7, 0x34, 0x62, 0xf8, 0x65, 0x58, 0x4e, 0x41, 0x15, 0xdb, 0x4f, + 0x43, 0x2d, 0x4a, 0xc0, 0x8a, 0xf7, 0x46, 0x72, 0xad, 0x64, 0x0f, 0x31, 0x11, 0xf1, 0xcf, 0x2d, + 0x80, 0x64, 0x0d, 0xb5, 0x00, 0xe4, 0xea, 0x8b, 0x6e, 0xb4, 0x23, 0x18, 0xb6, 0x89, 0x01, 0x41, + 0xa7, 0xe1, 0x70, 0x32, 0xbb, 0x1a, 0x6c, 0xed, 0xb8, 0x61, 0x4f, 0xc8, 0xc0, 0x26, 0x07, 0x17, + 0x10, 0x02, 0x3b, 0x74, 0x19, 0x6d, 0x16, 0x56, 0xad, 0x93, 0x05, 0x22, 0xc6, 0x9c, 0x5b, 0x46, + 0x7d, 0xd7, 0x67, 0x4d, 0x5b, 0x88, 0x53, 0xcd, 0x38, 0x9c, 0xeb, 0x97, 0x46, 0xcd, 0xe2, 0xaa, + 0x75, 0x72, 0x81, 0xa8, 0x19, 0xfe, 0x77, 0x01, 0xea, 0xaf, 0x8e, 0x69, 0xb8, 0xab, 0x04, 0x80, + 0x5a, 0x50, 0x89, 0xe8, 0x80, 0x76, 0x59, 0x10, 0x4a, 0x8d, 0x74, 0xf2, 0x4d, 0x8b, 0xc4, 0x30, + 0xd4, 0x80, 0xe2, 0xc0, 0x1b, 0x7a, 0x4c, 0x5c, 0x6b, 0x81, 0xc8, 0x09, 0x3a, 0x0f, 0xc5, 0x88, + 0xb9, 0x21, 0x13, 0x77, 0xa9, 0x9d, 0x5d, 0x59, 0x93, 0x86, 0xb9, 0xa6, 0x0d, 0x73, 0xed, 0xba, + 0x36, 0xcc, 0x4e, 0xe5, 0xfd, 0x89, 0x93, 0x7b, 0xf7, 0xef, 0x8e, 0x45, 0xe4, 0x16, 0xf4, 0x34, + 0x14, 0xa8, 0xdf, 0x13, 0xf7, 0xfd, 0xbc, 0x3b, 0xf9, 0x06, 0x74, 0x06, 0xaa, 0x3d, 0x2f, 0xa4, + 0x5d, 0xe6, 0x05, 0xbe, 0xe0, 0x6a, 0xf1, 0xec, 0x72, 0xa2, 0x91, 0x0d, 0xbd, 0x44, 0x12, 0x2c, + 0x74, 0x1a, 0x4a, 0x11, 0x17, 0x5d, 0xd4, 0x2c, 0x73, 0x5b, 0xe8, 0x34, 0xf6, 0x27, 0xce, 0x92, + 0x84, 0x9c, 0x0e, 0x86, 0x1e, 0xa3, 0xc3, 0x11, 0xdb, 0x25, 0x0a, 0x07, 0x3d, 0x06, 0xe5, 0x1e, + 0x1d, 0x50, 0xae, 0xf0, 0x8a, 0x50, 0xf8, 0x92, 0x41, 0x5e, 0x2c, 0x10, 0x8d, 0x80, 0xde, 0x04, + 0x7b, 0x34, 0x70, 0xfd, 0x66, 0x55, 0x70, 0xb1, 0x98, 0x20, 0x5e, 0x1b, 0xb8, 0x7e, 0xe7, 0x99, + 0x8f, 0x26, 0xce, 0x53, 0x7d, 0x8f, 0xed, 0x8c, 0x6f, 0xae, 0x75, 0x83, 0x61, 0xbb, 0x1f, 0xba, + 0xdb, 0xae, 0xef, 0xb6, 0x07, 0xc1, 0x2d, 0xaf, 0xfd, 0xf6, 0x93, 0x6d, 0xfe, 0x06, 0x6f, 0x8f, + 0x69, 0xe8, 0xd1, 0xb0, 0xcd, 0xc9, 0xac, 0x09, 0x95, 0xf0, 0xad, 0x44, 0x90, 0x45, 0x57, 0xb8, + 0xfd, 0x05, 0x21, 0x5d, 0xdf, 0x19, 0xfb, 0xb7, 0xa2, 0x26, 0x88, 0x53, 0x8e, 0x25, 0xa7, 0x08, + 0x38, 0xa1, 0xdb, 0x97, 0xc2, 0x60, 0x3c, 0xea, 0x1c, 0xda, 0x9f, 0x38, 0x26, 0x3e, 0x31, 0x27, + 0x57, 0xec, 0x4a, 0x69, 0xa9, 0x8c, 0xdf, 0x2b, 0x00, 0xda, 0x72, 0x87, 0xa3, 0x01, 0x9d, 0x4b, + 0xfd, 0xb1, 0xa2, 0xf3, 0xf7, 0xac, 0xe8, 0xc2, 0xbc, 0x8a, 0x4e, 0xb4, 0x66, 0xcf, 0xa7, 0xb5, + 0xe2, 0xe7, 0xd5, 0x5a, 0xe9, 0x2b, 0xaf, 0x35, 0xdc, 0x04, 0x9b, 0x53, 0xe6, 0xce, 0x32, 0x74, + 0xef, 0x08, 0xdd, 0xd4, 0x09, 0x1f, 0xe2, 0x4d, 0x28, 0x49, 0xbe, 0xd0, 0x4a, 0x56, 0x79, 0xe9, + 0x77, 0x9b, 0x28, 0xae, 0xa0, 0x55, 0xb2, 0x94, 0xa8, 0xa4, 0x20, 0x84, 0x8d, 0xff, 0x68, 0xc1, + 0x82, 0xb2, 0x08, 0xe5, 0xfb, 0x6e, 0x42, 0x59, 0xfa, 0x1e, 0xed, 0xf7, 0x8e, 0x65, 0xfd, 0xde, + 0x85, 0x9e, 0x3b, 0x62, 0x34, 0xec, 0xb4, 0xdf, 0x9f, 0x38, 0xd6, 0x47, 0x13, 0xe7, 0xd1, 0x59, + 0x42, 0xd3, 0xb1, 0x46, 0xfb, 0x4b, 0x4d, 0x18, 0x9d, 0x12, 0xb7, 0x63, 0x91, 0x32, 0xab, 0x43, + 0x6b, 0x32, 0x44, 0x5d, 0xf6, 0xfb, 0x34, 0xe2, 0x94, 0x6d, 0x6e, 0x11, 0x44, 0xe2, 0x70, 0x36, + 0xef, 0xb8, 0xa1, 0xef, 0xf9, 0xfd, 0xa8, 0x59, 0x10, 0x3e, 0x3d, 0x9e, 0xe3, 0x9f, 0x5a, 0xb0, + 0x9c, 0x32, 0x6b, 0xc5, 0xc4, 0x39, 0x28, 0x45, 0x5c, 0x53, 0x9a, 0x07, 0xc3, 0x28, 0xb6, 0x04, + 0xbc, 0xb3, 0xa8, 0x2e, 0x5f, 0x92, 0x73, 0xa2, 0xf0, 0xef, 0xdf, 0xd5, 0xfe, 0x62, 0x41, 0x5d, + 0x04, 0x26, 0xfd, 0xd6, 0x10, 0xd8, 0xbe, 0x3b, 0xa4, 0x4a, 0x55, 0x62, 0x6c, 0x44, 0x2b, 0x7e, + 0x5c, 0x45, 0x47, 0xab, 0x79, 0x1d, 0xac, 0x75, 0xcf, 0x0e, 0xd6, 0x4a, 0xde, 0x5d, 0x03, 0x8a, + 0xdc, 0xbc, 0x77, 0x85, 0x73, 0xad, 0x12, 0x39, 0xc1, 0x8f, 0xc2, 0x82, 0xe2, 0x42, 0x89, 0x76, + 0x56, 0x80, 0x1d, 0x42, 0x49, 0x6a, 0x02, 0xfd, 0x1f, 0x54, 0xe3, 0xc4, 0x44, 0x70, 0x5b, 0xe8, + 0x94, 0xf6, 0x27, 0x4e, 0x9e, 0x45, 0x24, 0x59, 0x40, 0x8e, 0x19, 0xf4, 0xad, 0x4e, 0x75, 0x7f, + 0xe2, 0x48, 0x80, 0x0a, 0xf1, 0xe8, 0x04, 0xd8, 0x3b, 0x3c, 0x6e, 0x72, 0x11, 0xd8, 0x9d, 0xca, + 0xfe, 0xc4, 0x11, 0x73, 0x22, 0x3e, 0xf1, 0x25, 0xa8, 0x6f, 0xd2, 0xbe, 0xdb, 0xdd, 0x55, 0x87, + 0x36, 0x34, 0x39, 0x7e, 0xa0, 0xa5, 0x69, 0x3c, 0x0c, 0xf5, 0xf8, 0xc4, 0xb7, 0x86, 0x91, 0x7a, + 0x0d, 0xb5, 0x18, 0xf6, 0x72, 0x84, 0x7f, 0x66, 0x81, 0xb2, 0x01, 0x84, 0x8d, 0x6c, 0x87, 0xfb, + 0x42, 0xd8, 0x9f, 0x38, 0x0a, 0xa2, 0x93, 0x19, 0xf4, 0x2c, 0x94, 0x23, 0x71, 0x22, 0x27, 0x96, + 0x35, 0x2d, 0xb1, 0xd0, 0x39, 0xc4, 0x4d, 0x64, 0x7f, 0xe2, 0x68, 0x44, 0xa2, 0x07, 0x68, 0x2d, + 0x95, 0x10, 0x48, 0xc6, 0x16, 0xf7, 0x27, 0x8e, 0x01, 0x35, 0x13, 0x04, 0xfc, 0x99, 0x05, 0xb5, + 0xeb, 0xae, 0x17, 0x9b, 0x50, 0x53, 0xab, 0x28, 0xf1, 0xd5, 0x12, 0xc0, 0x2d, 0xb1, 0x47, 0x07, + 0xee, 0xee, 0xc5, 0x20, 0x14, 0x74, 0x17, 0x48, 0x3c, 0x4f, 0x62, 0xb8, 0x3d, 0x35, 0x86, 0x17, + 0xe7, 0x77, 0xed, 0xff, 0x5b, 0x47, 0x7a, 0xc5, 0xae, 0xe4, 0x97, 0x0a, 0xf8, 0x3d, 0x0b, 0xea, + 0x92, 0x79, 0x65, 0x79, 0xdf, 0x83, 0x92, 0x94, 0x8d, 0x60, 0xff, 0xbf, 0x38, 0xa6, 0x53, 0xf3, + 0x38, 0x25, 0x45, 0x13, 0x3d, 0x0f, 0x8b, 0xbd, 0x30, 0x18, 0x8d, 0x68, 0x6f, 0x4b, 0xb9, 0xbf, + 0x7c, 0xd6, 0xfd, 0x6d, 0x98, 0xeb, 0x24, 0x83, 0x8e, 0xff, 0x6a, 0xc1, 0x82, 0x72, 0x26, 0x4a, + 0x5d, 0xb1, 0x88, 0xad, 0x7b, 0x8e, 0x9e, 0xf9, 0x79, 0xa3, 0xe7, 0x51, 0x28, 0xf5, 0x79, 0x7c, + 0xd1, 0x0e, 0x49, 0xcd, 0xe6, 0x8b, 0xaa, 0xf8, 0x0a, 0x2c, 0x6a, 0x56, 0x66, 0x78, 0xd4, 0x95, + 0xac, 0x47, 0xbd, 0xdc, 0xa3, 0x3e, 0xf3, 0xb6, 0xbd, 0xd8, 0x47, 0x2a, 0x7c, 0xfc, 0x23, 0x0b, + 0x96, 0xb2, 0x28, 0x68, 0x23, 0x53, 0x58, 0x3c, 0x32, 0x9b, 0x9c, 0x59, 0x53, 0x68, 0xd2, 0xaa, + 0xb2, 0x78, 0xea, 0x6e, 0x95, 0x45, 0xc3, 0x74, 0x32, 0x55, 0xe5, 0x15, 0xf0, 0x4f, 0x2c, 0x58, + 0x48, 0xe9, 0x12, 0x9d, 0x03, 0x7b, 0x3b, 0x0c, 0x86, 0x73, 0x29, 0x4a, 0xec, 0x40, 0x5f, 0x87, + 0x3c, 0x0b, 0xe6, 0x52, 0x53, 0x9e, 0x05, 0x5c, 0x4b, 0x8a, 0xfd, 0x82, 0xcc, 0xdb, 0xe5, 0x0c, + 0x3f, 0x05, 0x55, 0xc1, 0xd0, 0x35, 0xd7, 0x0b, 0xa7, 0x06, 0x8c, 0xe9, 0x0c, 0x3d, 0x0b, 0x87, + 0xa4, 0x33, 0x9c, 0xbe, 0xb9, 0x3e, 0x6d, 0x73, 0x5d, 0x6f, 0x3e, 0x0e, 0x45, 0x91, 0x74, 0xf0, + 0x2d, 0x3d, 0x97, 0xb9, 0x7a, 0x0b, 0x1f, 0xe3, 0x23, 0xb0, 0xcc, 0xdf, 0x20, 0x0d, 0xa3, 0xf5, + 0x60, 0xec, 0x33, 0x5d, 0x37, 0x9d, 0x86, 0x46, 0x1a, 0xac, 0xac, 0xa4, 0x01, 0xc5, 0x2e, 0x07, + 0x08, 0x1a, 0x0b, 0x44, 0x4e, 0xf0, 0x2f, 0x2d, 0x40, 0x97, 0x28, 0x13, 0xa7, 0x5c, 0xde, 0x88, + 0x9f, 0xc7, 0x0a, 0x54, 0x86, 0x2e, 0xeb, 0xee, 0xd0, 0x30, 0xd2, 0xf9, 0x8b, 0x9e, 0x7f, 0x19, + 0x89, 0x27, 0x3e, 0x03, 0xcb, 0xa9, 0x5b, 0x2a, 0x9e, 0x56, 0xa0, 0xd2, 0x55, 0x30, 0x15, 0xf2, + 0xe2, 0x39, 0xfe, 0x5d, 0x1e, 0x2a, 0x3a, 0xad, 0x43, 0x67, 0xa0, 0xb6, 0xed, 0xf9, 0x7d, 0x1a, + 0x8e, 0x42, 0x4f, 0x89, 0xc0, 0x96, 0x69, 0x9e, 0x01, 0x26, 0xe6, 0x04, 0x3d, 0x0e, 0xe5, 0x71, + 0x44, 0xc3, 0xb7, 0x3c, 0xf9, 0xd2, 0xab, 0x9d, 0xc6, 0xde, 0xc4, 0x29, 0xbd, 0x16, 0xd1, 0xf0, + 0xf2, 0x06, 0x0f, 0x3e, 0x63, 0x31, 0x22, 0xf2, 0xbb, 0x87, 0x5e, 0x52, 0x66, 0x2a, 0x12, 0xb8, + 0xce, 0x37, 0xf8, 0xf5, 0x33, 0xae, 0x6e, 0x14, 0x06, 0x43, 0xca, 0x76, 0xe8, 0x38, 0x6a, 0x77, + 0x83, 0xe1, 0x30, 0xf0, 0xdb, 0xa2, 0x13, 0x20, 0x98, 0xe6, 0x11, 0x94, 0x6f, 0x57, 0x96, 0x7b, + 0x1d, 0xca, 0x6c, 0x27, 0x0c, 0xc6, 0xfd, 0x1d, 0x11, 0x18, 0x0a, 0x9d, 0xf3, 0xf3, 0xd3, 0xd3, + 0x14, 0x88, 0x1e, 0xa0, 0x87, 0xb9, 0xb4, 0x68, 0xf7, 0x56, 0x34, 0x1e, 0xca, 0xda, 0xb3, 0x53, + 0xdc, 0x9f, 0x38, 0xd6, 0xe3, 0x24, 0x06, 0xe3, 0x0b, 0xb0, 0x90, 0x4a, 0x85, 0xd1, 0x13, 0x60, + 0x87, 0x74, 0x5b, 0xbb, 0x02, 0x74, 0x30, 0x63, 0x96, 0xd1, 0x9f, 0xe3, 0x10, 0xf1, 0x89, 0x7f, + 0x98, 0x07, 0xc7, 0xa8, 0xfa, 0x2f, 0x06, 0xe1, 0xcb, 0x94, 0x85, 0x5e, 0xf7, 0xaa, 0x3b, 0xa4, + 0xda, 0xbc, 0x1c, 0xa8, 0x0d, 0x05, 0xf0, 0x2d, 0xe3, 0x15, 0xc1, 0x30, 0xc6, 0x43, 0x0f, 0x01, + 0x88, 0x67, 0x27, 0xd7, 0xe5, 0x83, 0xaa, 0x0a, 0x88, 0x58, 0x5e, 0x4f, 0x09, 0xbb, 0x3d, 0xa7, + 0x70, 0x94, 0x90, 0x2f, 0x67, 0x85, 0x3c, 0x37, 0x9d, 0x58, 0xb2, 0xe6, 0x73, 0x29, 0xa6, 0x9f, + 0x0b, 0xfe, 0x9b, 0x05, 0xad, 0x4d, 0x7d, 0xf3, 0x7b, 0x14, 0x87, 0xe6, 0x37, 0x7f, 0x9f, 0xf8, + 0x2d, 0x7c, 0x31, 0x7e, 0x71, 0x0b, 0x60, 0xd3, 0xf3, 0xe9, 0x45, 0x6f, 0xc0, 0x68, 0x38, 0xa5, + 0x10, 0xfa, 0x71, 0x21, 0xf1, 0x2a, 0x84, 0x6e, 0x6b, 0x3e, 0xd7, 0x0d, 0x57, 0x7e, 0x3f, 0xd8, + 0xc8, 0xdf, 0x47, 0xb5, 0x15, 0x32, 0x5e, 0xce, 0x87, 0xf2, 0xb6, 0x60, 0x4f, 0x46, 0xe5, 0x54, + 0x8f, 0x29, 0xe1, 0xbd, 0xf3, 0x2d, 0x75, 0xf8, 0xd3, 0x77, 0x49, 0xaa, 0x44, 0xe7, 0xaf, 0x1d, + 0xed, 0xfa, 0xcc, 0x7d, 0xc7, 0xd8, 0x4f, 0xf4, 0x21, 0xc8, 0x55, 0x79, 0x5b, 0x71, 0x6a, 0xde, + 0xf6, 0x9c, 0x3a, 0xe6, 0x8b, 0xe4, 0x6e, 0xf8, 0xb9, 0xc4, 0x89, 0x0a, 0xa5, 0x28, 0x27, 0xfa, + 0xc8, 0xdd, 0x9e, 0xb8, 0x7a, 0xd8, 0x7f, 0xb2, 0x60, 0xe9, 0x12, 0x65, 0xe9, 0x3c, 0xea, 0x01, + 0x52, 0x29, 0x7e, 0x11, 0x0e, 0x1b, 0xf7, 0x57, 0xdc, 0x3f, 0x99, 0x49, 0x9e, 0x8e, 0x24, 0xfc, + 0x5f, 0xf6, 0x7b, 0xf4, 0x1d, 0x55, 0x93, 0xa6, 0xf3, 0xa6, 0x6b, 0x50, 0x33, 0x16, 0xd1, 0x85, + 0x4c, 0xc6, 0xb4, 0x9c, 0x69, 0xc5, 0xf2, 0xa8, 0xdf, 0x69, 0x28, 0x9e, 0x64, 0xe5, 0xa9, 0xf2, + 0xe1, 0x38, 0xbb, 0xd8, 0x02, 0x24, 0xd4, 0x25, 0xc8, 0x9a, 0xf1, 0x4d, 0x40, 0x5f, 0x8a, 0x53, + 0xa7, 0x78, 0x8e, 0x1e, 0x06, 0x3b, 0x0c, 0xee, 0xe8, 0x54, 0x78, 0x21, 0x39, 0x92, 0x04, 0x77, + 0x88, 0x58, 0xc2, 0xcf, 0x42, 0x81, 0x04, 0x77, 0x50, 0x0b, 0x20, 0x74, 0xfd, 0x3e, 0xbd, 0x11, + 0x17, 0x61, 0x75, 0x62, 0x40, 0x66, 0xe4, 0x1e, 0xeb, 0x70, 0xd8, 0xbc, 0x91, 0x54, 0xf7, 0x1a, + 0x94, 0x5f, 0x1d, 0x9b, 0xe2, 0x6a, 0x64, 0xc4, 0x25, 0x6b, 0x7d, 0x8d, 0xc4, 0x6d, 0x06, 0x12, + 0x38, 0x3a, 0x01, 0x55, 0xe6, 0xde, 0x1c, 0xd0, 0xab, 0x89, 0x9b, 0x4b, 0x00, 0x7c, 0x95, 0xd7, + 0x8f, 0x37, 0x8c, 0x24, 0x2a, 0x01, 0xa0, 0xc7, 0x60, 0x29, 0xb9, 0xf3, 0xb5, 0x90, 0x6e, 0x7b, + 0xef, 0x08, 0x0d, 0xd7, 0xc9, 0x01, 0x38, 0x3a, 0x09, 0x87, 0x12, 0xd8, 0x96, 0x48, 0x56, 0x6c, + 0x81, 0x9a, 0x05, 0x73, 0xd9, 0x08, 0x76, 0x5f, 0xb8, 0x3d, 0x76, 0x07, 0xe2, 0xf1, 0xd5, 0x89, + 0x01, 0xc1, 0x7f, 0xb6, 0xe0, 0xb0, 0x54, 0x35, 0x73, 0xd9, 0x03, 0x69, 0xf5, 0xbf, 0xb2, 0x00, + 0x99, 0x1c, 0x28, 0xd3, 0xfa, 0x7f, 0xb3, 0x97, 0xc4, 0xb3, 0xa1, 0x9a, 0x28, 0x8b, 0x25, 0x28, + 0x69, 0x07, 0x61, 0x28, 0x75, 0x65, 0xcf, 0x4c, 0x34, 0xbf, 0x65, 0xdd, 0x2d, 0x21, 0x44, 0x7d, + 0x23, 0x07, 0x8a, 0x37, 0x77, 0x19, 0x8d, 0x54, 0xd5, 0x2c, 0xda, 0x05, 0x02, 0x40, 0xe4, 0x17, + 0x3f, 0x8b, 0xfa, 0x4c, 0x58, 0x8d, 0x9d, 0x9c, 0xa5, 0x40, 0x44, 0x0f, 0xf0, 0x6f, 0xf3, 0xb0, + 0x70, 0x23, 0x18, 0x8c, 0x93, 0xc0, 0xf8, 0x20, 0x05, 0x8c, 0x54, 0x29, 0x5f, 0xd4, 0xa5, 0x3c, + 0x02, 0x3b, 0x62, 0x74, 0x24, 0x2c, 0xab, 0x40, 0xc4, 0x18, 0x61, 0xa8, 0x33, 0x37, 0xec, 0x53, + 0x26, 0x0b, 0xa4, 0x66, 0x49, 0x64, 0xae, 0x29, 0x18, 0x5a, 0x85, 0x9a, 0xdb, 0xef, 0x87, 0xb4, + 0xef, 0x32, 0xda, 0xd9, 0x6d, 0x96, 0xc5, 0x61, 0x26, 0x08, 0xbf, 0x01, 0x8b, 0x5a, 0x58, 0x4a, + 0xa5, 0x4f, 0x40, 0xf9, 0x6d, 0x01, 0x99, 0xd2, 0x5a, 0x93, 0xa8, 0xca, 0x8d, 0x69, 0xb4, 0xf4, + 0x4f, 0x08, 0xfa, 0xce, 0xf8, 0x0a, 0x94, 0x24, 0x3a, 0x3a, 0x61, 0x96, 0x39, 0x32, 0xd3, 0xe3, + 0x73, 0x55, 0xb3, 0x60, 0x28, 0x49, 0x42, 0x4a, 0xf1, 0xc2, 0x36, 0x24, 0x84, 0xa8, 0x6f, 0xfc, + 0x2f, 0x0b, 0x8e, 0x6c, 0x50, 0x46, 0xbb, 0x8c, 0xf6, 0x2e, 0x7a, 0x74, 0xd0, 0xfb, 0x52, 0x2b, + 0xf0, 0xb8, 0x8f, 0x56, 0x30, 0xfa, 0x68, 0xdc, 0xef, 0x0c, 0x3c, 0x9f, 0x6e, 0x1a, 0x8d, 0x98, + 0x04, 0xc0, 0x3d, 0xc4, 0x36, 0xbf, 0xb8, 0x5c, 0x96, 0xbf, 0xd9, 0x18, 0x90, 0x58, 0xc3, 0xa5, + 0x44, 0xc3, 0xf8, 0x07, 0x16, 0x1c, 0xcd, 0x72, 0xad, 0x94, 0xd4, 0x86, 0x92, 0xd8, 0x3c, 0xa5, + 0x85, 0x9b, 0xda, 0x41, 0x14, 0x1a, 0x3a, 0x97, 0x3a, 0x5f, 0xfc, 0xd6, 0xd3, 0x69, 0xee, 0x4f, + 0x9c, 0x46, 0x02, 0x35, 0xba, 0x04, 0x06, 0x2e, 0xfe, 0x03, 0xaf, 0xa5, 0x4d, 0x9a, 0x42, 0xdf, + 0xdc, 0xbe, 0x94, 0xef, 0x95, 0x13, 0xf4, 0x35, 0xb0, 0xd9, 0xee, 0x48, 0xb9, 0xdc, 0xce, 0x91, + 0xcf, 0x26, 0xce, 0xe1, 0xd4, 0xb6, 0xeb, 0xbb, 0x23, 0x4a, 0x04, 0x0a, 0x37, 0xcb, 0xae, 0x1b, + 0xf6, 0x3c, 0xdf, 0x1d, 0x78, 0x4c, 0x8a, 0xd1, 0x26, 0x26, 0x08, 0x35, 0xa1, 0x3c, 0x72, 0xc3, + 0x48, 0xe7, 0x4d, 0x55, 0xa2, 0xa7, 0xa2, 0xcd, 0x71, 0x8b, 0xb2, 0xee, 0x8e, 0x74, 0xb3, 0xaa, + 0xcd, 0x21, 0x20, 0xa9, 0x36, 0x87, 0x80, 0xe0, 0x5f, 0x18, 0x86, 0x23, 0xdf, 0xc4, 0x57, 0xce, + 0x70, 0xf0, 0x77, 0x12, 0x2d, 0xeb, 0x2b, 0x2a, 0x2d, 0x3f, 0x0f, 0x8b, 0xbd, 0xd4, 0xca, 0x6c, + 0x6d, 0xcb, 0x16, 0x6e, 0x06, 0x1d, 0x8f, 0x13, 0xd5, 0x09, 0xc8, 0x0c, 0xd5, 0x65, 0xf4, 0x91, + 0x3f, 0xa8, 0x8f, 0x44, 0xea, 0x85, 0xbb, 0x4b, 0xfd, 0xb1, 0x47, 0xa0, 0x1a, 0xff, 0x5c, 0x87, + 0x6a, 0x50, 0xbe, 0xf8, 0x0a, 0x79, 0xfd, 0x02, 0xd9, 0x58, 0xca, 0xa1, 0x3a, 0x54, 0x3a, 0x17, + 0xd6, 0x5f, 0x12, 0x33, 0xeb, 0xec, 0x6f, 0x4a, 0x3a, 0x11, 0x08, 0xd1, 0x37, 0xa1, 0x28, 0xa3, + 0xfb, 0xd1, 0x84, 0x39, 0xf3, 0x97, 0xac, 0x95, 0x63, 0x07, 0xe0, 0x52, 0x4a, 0x38, 0xf7, 0x84, + 0x85, 0xae, 0x42, 0x4d, 0x00, 0x55, 0xaf, 0xf8, 0x44, 0xb6, 0x65, 0x9b, 0xa2, 0xf4, 0xd0, 0x8c, + 0x55, 0x83, 0xde, 0x79, 0x28, 0x4a, 0x81, 0x1d, 0xcd, 0x24, 0x61, 0x53, 0x6e, 0x93, 0xea, 0x9e, + 0xe3, 0x1c, 0x7a, 0x06, 0xec, 0xeb, 0xae, 0x37, 0x40, 0x46, 0x0e, 0x68, 0xb4, 0x78, 0x57, 0x8e, + 0x66, 0xc1, 0xc6, 0xb1, 0xcf, 0xc5, 0x9d, 0xea, 0x63, 0xd9, 0x76, 0x99, 0xde, 0xde, 0x3c, 0xb8, + 0x10, 0x9f, 0xfc, 0x8a, 0xec, 0xa7, 0xea, 0xa6, 0x0d, 0x7a, 0x28, 0x7d, 0x54, 0xa6, 0xc7, 0xb3, + 0xd2, 0x9a, 0xb5, 0x1c, 0x13, 0xdc, 0x84, 0x9a, 0xd1, 0x30, 0x31, 0xc5, 0x7a, 0xb0, 0xdb, 0x63, + 0x8a, 0x75, 0x4a, 0x97, 0x05, 0xe7, 0xd0, 0x25, 0xa8, 0xf0, 0xcc, 0x59, 0xfc, 0xb0, 0x72, 0x3c, + 0x9b, 0x20, 0x1b, 0x89, 0xd1, 0xca, 0x89, 0xe9, 0x8b, 0x31, 0xa1, 0x6f, 0x43, 0xf5, 0x12, 0x65, + 0x2a, 0xba, 0x1c, 0xcb, 0x86, 0xa7, 0x29, 0x92, 0x4a, 0x87, 0x38, 0x9c, 0x43, 0x6f, 0x88, 0x24, + 0x3e, 0xed, 0x5c, 0x91, 0x33, 0xc3, 0x89, 0xc6, 0xf7, 0x5a, 0x9d, 0x8d, 0x10, 0x53, 0x7e, 0x3d, + 0x45, 0x59, 0xc5, 0x61, 0x67, 0xc6, 0x83, 0x8d, 0x29, 0x3b, 0x77, 0xf9, 0xdb, 0x05, 0xce, 0x9d, + 0x7d, 0x53, 0xff, 0xf3, 0x60, 0xc3, 0x65, 0x2e, 0x7a, 0x05, 0x16, 0x85, 0x2c, 0xe3, 0xbf, 0x26, + 0xa4, 0x6c, 0xfe, 0xc0, 0xff, 0x20, 0x52, 0x36, 0x7f, 0xf0, 0xff, 0x10, 0x38, 0xd7, 0x79, 0xf3, + 0x83, 0x8f, 0x5b, 0xb9, 0x0f, 0x3f, 0x6e, 0xe5, 0x3e, 0xfd, 0xb8, 0x65, 0x7d, 0x7f, 0xaf, 0x65, + 0xfd, 0x7a, 0xaf, 0x65, 0xbd, 0xbf, 0xd7, 0xb2, 0x3e, 0xd8, 0x6b, 0x59, 0xff, 0xd8, 0x6b, 0x59, + 0xff, 0xdc, 0x6b, 0xe5, 0x3e, 0xdd, 0x6b, 0x59, 0xef, 0x7e, 0xd2, 0xca, 0x7d, 0xf0, 0x49, 0x2b, + 0xf7, 0xe1, 0x27, 0xad, 0xdc, 0x77, 0x1f, 0xbd, 0x7b, 0xc1, 0x2a, 0xdd, 0x62, 0x49, 0x7c, 0x3d, + 0xf9, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8a, 0xd4, 0xcd, 0x88, 0x1f, 0x23, 0x00, 0x00, } func (x Direction) String() string { @@ -4996,18 +5004,10 @@ func (this *DetectedLabelsRequest) Equal(that interface{}) bool { } else if this == nil { return false } - if that1.Start == nil { - if this.Start != nil { - return false - } - } else if !this.Start.Equal(*that1.Start) { + if !this.Start.Equal(that1.Start) { return false } - if that1.End == nil { - if this.End != nil { - return false - } - } else if !this.End.Equal(*that1.End) { + if !this.End.Equal(that1.End) { return false } if this.Query != that1.Query { @@ -5069,6 +5069,9 @@ func (this *DetectedLabel) Equal(that interface{}) bool { if this.Cardinality != that1.Cardinality { return false } + if !bytes.Equal(this.Sketch, that1.Sketch) { + return false + } return true } func (this *LabelToValuesResponse) GoString() string { @@ -5774,10 +5777,11 @@ func (this *DetectedLabel) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&logproto.DetectedLabel{") s = append(s, "Label: "+fmt.Sprintf("%#v", this.Label)+",\n") s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n") + s = append(s, "Sketch: "+fmt.Sprintf("%#v", this.Sketch)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -8754,26 +8758,22 @@ func (m *DetectedLabelsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x1a } - if m.End != nil { - n26, err26 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.End):]) - if err26 != nil { - return 0, err26 - } - i -= n26 - i = encodeVarintLogproto(dAtA, i, uint64(n26)) - i-- - dAtA[i] = 0x12 + n26, err26 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.End, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.End):]) + if err26 != nil { + return 0, err26 } - if m.Start != nil { - n27, err27 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start):]) - if err27 != nil { - return 0, err27 - } - i -= n27 - i = encodeVarintLogproto(dAtA, i, uint64(n27)) - i-- - dAtA[i] = 0xa + i -= n26 + i = encodeVarintLogproto(dAtA, i, uint64(n26)) + i-- + dAtA[i] = 0x12 + n27, err27 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Start, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Start):]) + if err27 != nil { + return 0, err27 } + i -= n27 + i = encodeVarintLogproto(dAtA, i, uint64(n27)) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } @@ -8834,6 +8834,13 @@ func (m *DetectedLabel) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Sketch) > 0 { + i -= len(m.Sketch) + copy(dAtA[i:], m.Sketch) + i = encodeVarintLogproto(dAtA, i, uint64(len(m.Sketch))) + i-- + dAtA[i] = 0x1a + } if m.Cardinality != 0 { i = encodeVarintLogproto(dAtA, i, uint64(m.Cardinality)) i-- @@ -9894,14 +9901,10 @@ func (m *DetectedLabelsRequest) Size() (n int) { } var l int _ = l - if m.Start != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Start) - n += 1 + l + sovLogproto(uint64(l)) - } - if m.End != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.End) - n += 1 + l + sovLogproto(uint64(l)) - } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Start) + n += 1 + l + sovLogproto(uint64(l)) + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.End) + n += 1 + l + sovLogproto(uint64(l)) l = len(m.Query) if l > 0 { n += 1 + l + sovLogproto(uint64(l)) @@ -9937,6 +9940,10 @@ func (m *DetectedLabel) Size() (n int) { if m.Cardinality != 0 { n += 1 + sovLogproto(uint64(m.Cardinality)) } + l = len(m.Sketch) + if l > 0 { + n += 1 + l + sovLogproto(uint64(l)) + } return n } @@ -10642,8 +10649,8 @@ func (this *DetectedLabelsRequest) String() string { return "nil" } s := strings.Join([]string{`&DetectedLabelsRequest{`, - `Start:` + strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1) + `,`, - `End:` + strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1) + `,`, + `Start:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Start), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `End:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.End), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `Query:` + fmt.Sprintf("%v", this.Query) + `,`, `}`, }, "") @@ -10671,6 +10678,7 @@ func (this *DetectedLabel) String() string { s := strings.Join([]string{`&DetectedLabel{`, `Label:` + fmt.Sprintf("%v", this.Label) + `,`, `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, + `Sketch:` + fmt.Sprintf("%v", this.Sketch) + `,`, `}`, }, "") return s @@ -17650,10 +17658,7 @@ func (m *DetectedLabelsRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Start == nil { - m.Start = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Start, dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Start, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17686,10 +17691,7 @@ func (m *DetectedLabelsRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.End == nil { - m.End = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.End, dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.End, dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -17916,6 +17918,40 @@ func (m *DetectedLabel) Unmarshal(dAtA []byte) error { break } } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sketch", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogproto + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthLogproto + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthLogproto + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sketch = append(m.Sketch[:0], dAtA[iNdEx:postIndex]...) + if m.Sketch == nil { + m.Sketch = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipLogproto(dAtA[iNdEx:]) diff --git a/pkg/logproto/logproto.proto b/pkg/logproto/logproto.proto index b9c3cd987c7aa..189cec5d948ca 100644 --- a/pkg/logproto/logproto.proto +++ b/pkg/logproto/logproto.proto @@ -478,11 +478,11 @@ message DetectedField { message DetectedLabelsRequest { google.protobuf.Timestamp start = 1 [ (gogoproto.stdtime) = true, - (gogoproto.nullable) = true + (gogoproto.nullable) = false ]; google.protobuf.Timestamp end = 2 [ (gogoproto.stdtime) = true, - (gogoproto.nullable) = true + (gogoproto.nullable) = false ]; string query = 3; } @@ -494,4 +494,5 @@ message DetectedLabelsResponse { message DetectedLabel { string label = 1; uint64 cardinality = 2; + bytes sketch = 3 [(gogoproto.jsontag) = "sketch,omitempty"]; } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index bafbe334cdf75..f2fe80566b461 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -5,7 +5,6 @@ import ( "flag" "fmt" "net/http" - "regexp" "sort" "strconv" "time" @@ -14,6 +13,7 @@ import ( "github.com/dustin/go-humanize" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/google/uuid" "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/tenant" "github.com/opentracing/opentracing-go" @@ -50,14 +50,10 @@ const ( // before checking if a new entry is available (to avoid spinning the CPU in a continuous // check loop) tailerWaitEntryThrottle = time.Second / 2 - - idPattern = `^(?:(?:[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})|(?:(?:\{)?[0-9a-fA-F]{8}(?:-?[0-9a-fA-F]{4}){3}-?[0-9a-fA-F]{12}(?:\})?)|(\d+(?:\.\d+)?))$` ) var ( nowFunc = func() time.Time { return time.Now() } - - idRegexp = regexp.MustCompile(idPattern) ) type interval struct { @@ -921,7 +917,6 @@ func (q *SingleTenantQuerier) DetectedLabels(ctx context.Context, req *logproto. if err != nil { return nil, err } - var detectedLabels []*logproto.DetectedLabel staticLabels := map[string]struct{}{"cluster": {}, "namespace": {}, "instance": {}, "pod": {}} // Enforce the query timeout while querying backends @@ -930,24 +925,26 @@ func (q *SingleTenantQuerier) DetectedLabels(ctx context.Context, req *logproto. defer cancel() g, ctx := errgroup.WithContext(ctx) - if *req.Start, *req.End, err = validateQueryTimeRangeLimits(ctx, userID, q.limits, *req.Start, *req.End); err != nil { + if req.Start, req.End, err = validateQueryTimeRangeLimits(ctx, userID, q.limits, req.Start, req.End); err != nil { return nil, err } - ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(*req.Start, *req.End) + ingesterQueryInterval, storeQueryInterval := q.buildQueryIntervals(req.Start, req.End) + // Fetch labels from ingesters var ingesterLabels *logproto.LabelToValuesResponse if !q.cfg.QueryStoreOnly && ingesterQueryInterval != nil { g.Go(func() error { var err error splitReq := *req - splitReq.Start = &ingesterQueryInterval.start - splitReq.End = &ingesterQueryInterval.end + splitReq.Start = ingesterQueryInterval.start + splitReq.End = ingesterQueryInterval.end ingesterLabels, err = q.ingesterQuerier.DetectedLabel(ctx, &splitReq) return err }) } + // Fetch labels from the store storeLabelsMap := make(map[string][]string) if !q.cfg.QueryIngesterOnly && storeQueryInterval != nil { var matchers []*labels.Matcher @@ -967,9 +964,7 @@ func (q *SingleTenantQuerier) DetectedLabels(ctx context.Context, req *logproto. if err != nil { return err } - if q.isLabelRelevant(label, values, staticLabels) { - storeLabelsMap[label] = values - } + storeLabelsMap[label] = values } return err }) @@ -985,40 +980,58 @@ func (q *SingleTenantQuerier) DetectedLabels(ctx context.Context, req *logproto. }, nil } - if ingesterLabels != nil { - // append static labels before so they are in sorted order - for l := range staticLabels { - if values, present := ingesterLabels.Labels[l]; present { - detectedLabels = append(detectedLabels, &logproto.DetectedLabel{Label: l, Cardinality: uint64(len(values.Values))}) - } - } + return &logproto.DetectedLabelsResponse{ + DetectedLabels: countLabelsAndCardinality(storeLabelsMap, ingesterLabels, staticLabels), + }, nil +} + +func countLabelsAndCardinality(storeLabelsMap map[string][]string, ingesterLabels *logproto.LabelToValuesResponse, staticLabels map[string]struct{}) []*logproto.DetectedLabel { + dlMap := make(map[string]*parsedFields) - for label, values := range ingesterLabels.Labels { - if q.isLabelRelevant(label, values.Values, staticLabels) { - combinedValues := values.Values - storeValues, storeHasLabel := storeLabelsMap[label] - if storeHasLabel { - combinedValues = append(combinedValues, storeValues...) + if ingesterLabels != nil { + for label, val := range ingesterLabels.Labels { + if _, isStatic := staticLabels[label]; isStatic || !containsAllIDTypes(val.Values) { + _, ok := dlMap[label] + if !ok { + dlMap[label] = newParsedLabels() } - slices.Sort(combinedValues) - uniqueValues := slices.Compact(combinedValues) - // TODO(shantanu): There's a bug here. Unique values can go above 50. Will need a bit of refactoring - detectedLabels = append(detectedLabels, &logproto.DetectedLabel{Label: label, Cardinality: uint64(len(uniqueValues))}) - delete(storeLabelsMap, label) + parsedFields := dlMap[label] + for _, v := range val.Values { + parsedFields.Insert(v) + } } } } for label, values := range storeLabelsMap { - slices.Sort(values) - uniqueValues := slices.Compact(values) - detectedLabels = append(detectedLabels, &logproto.DetectedLabel{Label: label, Cardinality: uint64(len(uniqueValues))}) + if _, isStatic := staticLabels[label]; isStatic || !containsAllIDTypes(values) { + _, ok := dlMap[label] + if !ok { + dlMap[label] = newParsedLabels() + } + + parsedFields := dlMap[label] + for _, v := range values { + parsedFields.Insert(v) + } + } } - return &logproto.DetectedLabelsResponse{ - DetectedLabels: detectedLabels, - }, nil + var detectedLabels []*logproto.DetectedLabel + for k, v := range dlMap { + sketch, err := v.sketch.MarshalBinary() + if err != nil { + // TODO: add log here + continue + } + detectedLabels = append(detectedLabels, &logproto.DetectedLabel{ + Label: k, + Cardinality: v.Estimate(), + Sketch: sketch, + }) + } + return detectedLabels } type PatterQuerier interface { @@ -1037,24 +1050,15 @@ func (q *SingleTenantQuerier) Patterns(ctx context.Context, req *logproto.QueryP return res, err } -// isLabelRelevant returns if the label is relevant for logs app. A label is relevant if it is not of any numeric, UUID or GUID type -// It is also not relevant to return if the values are less than 1 or beyond 50. -func (q *SingleTenantQuerier) isLabelRelevant(label string, values []string, staticLabels map[string]struct{}) bool { - cardinality := len(values) - _, isStaticLabel := staticLabels[label] - if isStaticLabel || (cardinality < 2 || cardinality > 50) || - containsAllIDTypes(values) { - return false - } - - return true -} - // containsAllIDTypes filters out all UUID, GUID and numeric types. Returns false if even one value is not of the type func containsAllIDTypes(values []string) bool { for _, v := range values { - if !idRegexp.MatchString(v) { - return false + _, err := strconv.ParseFloat(v, 64) + if err != nil { + _, err = uuid.Parse(v) + if err != nil { + return false + } } } @@ -1141,6 +1145,13 @@ func newParsedFields(parser *string) *parsedFields { } } +func newParsedLabels() *parsedFields { + return &parsedFields{ + sketch: hyperloglog.New(), + fieldType: logproto.DetectedFieldString, + } +} + func (p *parsedFields) Insert(value string) { p.sketch.Insert([]byte(value)) } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 66370c34460be..a787616efeeee 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -1379,81 +1379,6 @@ func (d *mockDeleteGettter) GetAllDeleteRequestsForUser(_ context.Context, userI return d.results, nil } -func TestQuerier_isLabelRelevant(t *testing.T) { - for _, tc := range []struct { - name string - label string - values []string - expected bool - }{ - { - label: "uuidv4 values are not relevant", - values: []string{"751e8ee6-b377-4b2e-b7b5-5508fbe980ef", "6b7e2663-8ecb-42e1-8bdc-0c5de70185b3", "2e1e67ff-be4f-47b8-aee1-5d67ff1ddabf", "c95b2d62-74ed-4ed7-a8a1-eb72fc67946e"}, - expected: false, - }, - { - label: "guid values are not relevant", - values: []string{"57808f62-f117-4a22-84a0-bc3282c7f106", "5076e837-cd8d-4dd7-95ff-fecb087dccf6", "2e2a6554-1744-4399-b89a-88ae79c27096", "d3c31248-ec0c-4bc4-b11c-8fb1cfb42e62"}, - expected: false, - }, - { - label: "integer values are not relevant", - values: []string{"1", "2", "3", "4"}, - expected: false, - }, - { - label: "string values are relevant", - values: []string{"ingester", "querier", "query-frontend", "index-gateway"}, - expected: true, - }, - { - label: "guid with braces are not relevant", - values: []string{"{E9550CF7-58D9-48B9-8845-D9800C651AAC}", "{1617921B-1749-4FF0-A058-31AFB5D98149}", "{C119D92E-A4B9-48A3-A92C-6CA8AA8A6CCC}", "{228AAF1D-2DE7-4909-A4E9-246A7FA9D988}"}, - expected: false, - }, - { - label: "float values are not relevant", - values: []string{"1.2", "2.5", "3.3", "4.1"}, - expected: false, - }, - } { - t.Run(tc.name, func(t *testing.T) { - querier := &SingleTenantQuerier{cfg: mockQuerierConfig()} - assert.Equal(t, tc.expected, querier.isLabelRelevant(tc.label, tc.values, map[string]struct{}{"host": {}, "cluster": {}, "namespace": {}, "instance": {}, "pod": {}})) - }) - - } -} - -func TestQuerier_containsAllIDTypes(t *testing.T) { - for _, tc := range []struct { - name string - values []string - expected bool - }{ - { - name: "all uuidv4 values are valid", - values: []string{"751e8ee6-b377-4b2e-b7b5-5508fbe980ef", "6b7e2663-8ecb-42e1-8bdc-0c5de70185b3", "2e1e67ff-be4f-47b8-aee1-5d67ff1ddabf", "c95b2d62-74ed-4ed7-a8a1-eb72fc67946e"}, - expected: true, - }, - { - name: "one uuidv4 values are invalid", - values: []string{"w", "5076e837-cd8d-4dd7-95ff-fecb087dccf6", "2e2a6554-1744-4399-b89a-88ae79c27096", "d3c31248-ec0c-4bc4-b11c-8fb1cfb42e62"}, - expected: false, - }, - { - name: "all uuidv4 values are invalid", - values: []string{"w", "x", "y", "z"}, - expected: false, - }, - } { - t.Run(tc.name, func(t *testing.T) { - assert.Equal(t, tc.expected, containsAllIDTypes(tc.values)) - }) - - } -} - func TestQuerier_DetectedLabels(t *testing.T) { manyValues := []string{} now := time.Now() @@ -1469,8 +1394,8 @@ func TestQuerier_DetectedLabels(t *testing.T) { conf.IngesterQueryStoreMaxLookback = 0 request := logproto.DetectedLabelsRequest{ - Start: &now, - End: &now, + Start: now, + End: now, Query: "", } @@ -1507,8 +1432,11 @@ func TestQuerier_DetectedLabels(t *testing.T) { detectedLabels := resp.DetectedLabels assert.Len(t, detectedLabels, 3) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "storeLabel", Cardinality: 2}) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "ingesterLabel", Cardinality: 3}) + expectedCardinality := map[string]uint64{"storeLabel": 2, "ingesterLabel": 3, "cluster": 1} + for _, d := range detectedLabels { + card := expectedCardinality[d.Label] + assert.Equal(t, d.Cardinality, card, "Expected cardinality mismatch for: ", d.Label) + } }) t.Run("when both store and ingester responses are present, duplicates are removed", func(t *testing.T) { @@ -1547,9 +1475,12 @@ func TestQuerier_DetectedLabels(t *testing.T) { detectedLabels := resp.DetectedLabels assert.Len(t, detectedLabels, 4) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "storeLabel", Cardinality: 2}) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "ingesterLabel", Cardinality: 3}) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "commonLabel", Cardinality: 5}) + + expectedCardinality := map[string]uint64{"storeLabel": 2, "ingesterLabel": 3, "cluster": 1, "commonLabel": 5} + for _, d := range detectedLabels { + card := expectedCardinality[d.Label] + assert.Equal(t, d.Cardinality, card, "Expected cardinality mismatch for: ", d.Label) + } }) t.Run("returns a response when ingester data is empty", func(t *testing.T) { @@ -1579,8 +1510,11 @@ func TestQuerier_DetectedLabels(t *testing.T) { detectedLabels := resp.DetectedLabels assert.Len(t, detectedLabels, 2) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "storeLabel1", Cardinality: 2}) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "storeLabel2", Cardinality: 2}) + expectedCardinality := map[string]uint64{"storeLabel1": 2, "storeLabel2": 2} + for _, d := range detectedLabels { + card := expectedCardinality[d.Label] + assert.Equal(t, d.Cardinality, card, "Expected cardinality mismatch for: ", d.Label) + } }) t.Run("returns a response when store data is empty", func(t *testing.T) { @@ -1611,8 +1545,11 @@ func TestQuerier_DetectedLabels(t *testing.T) { detectedLabels := resp.DetectedLabels assert.Len(t, detectedLabels, 2) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "cluster", Cardinality: 1}) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "ingesterLabel", Cardinality: 3}) + expectedCardinality := map[string]uint64{"cluster": 1, "ingesterLabel": 3} + for _, d := range detectedLabels { + card := expectedCardinality[d.Label] + assert.Equal(t, d.Cardinality, card, "Expected cardinality mismatch for: ", d.Label) + } }) t.Run("id types like uuids, guids and numbers are not relevant detected labels", func(t *testing.T) { @@ -1646,36 +1583,6 @@ func TestQuerier_DetectedLabels(t *testing.T) { assert.Len(t, detectedLabels, 0) }) - t.Run("labels with more than required cardinality are not relevant", func(t *testing.T) { - ingesterResponse := logproto.LabelToValuesResponse{Labels: map[string]*logproto.UniqueLabelValues{ - "less-than-m-values": {Values: []string{"val1"}}, - "more-than-n-values": {Values: manyValues}, - }} - - ingesterClient := newQuerierClientMock() - storeClient := newStoreMock() - - ingesterClient.On("GetDetectedLabels", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(&ingesterResponse, nil) - storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return([]string{}, nil) - - querier, err := newQuerier( - conf, - mockIngesterClientConfig(), - newIngesterClientMockFactory(ingesterClient), - mockReadRingWithOneActiveIngester(), - &mockDeleteGettter{}, - storeClient, limits) - require.NoError(t, err) - - resp, err := querier.DetectedLabels(ctx, &request) - require.NoError(t, err) - - detectedLabels := resp.DetectedLabels - assert.Len(t, detectedLabels, 0) - }) - t.Run("static labels are always returned no matter their cardinality or value types", func(t *testing.T) { ingesterResponse := logproto.LabelToValuesResponse{Labels: map[string]*logproto.UniqueLabelValues{ "cluster": {Values: []string{"val1"}}, @@ -1691,8 +1598,8 @@ func TestQuerier_DetectedLabels(t *testing.T) { storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return([]string{}, nil) request := logproto.DetectedLabelsRequest{ - Start: &now, - End: &now, + Start: now, + End: now, Query: "", } @@ -1710,9 +1617,11 @@ func TestQuerier_DetectedLabels(t *testing.T) { detectedLabels := resp.DetectedLabels assert.Len(t, detectedLabels, 3) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "cluster", Cardinality: 1}) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "pod", Cardinality: 4}) - assert.Contains(t, detectedLabels, &logproto.DetectedLabel{Label: "namespace", Cardinality: 60}) + expectedCardinality := map[string]uint64{"cluster": 1, "pod": 4, "namespace": 60} + for _, d := range detectedLabels { + card := expectedCardinality[d.Label] + assert.Equal(t, d.Cardinality, card, "Expected cardinality mismatch for: ", d.Label) + } }) t.Run("no panics with ingester response is nil", func(t *testing.T) { @@ -1724,8 +1633,8 @@ func TestQuerier_DetectedLabels(t *testing.T) { storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return([]string{}, nil) request := logproto.DetectedLabelsRequest{ - Start: &now, - End: &now, + Start: now, + End: now.Add(2 * time.Hour), Query: "", } @@ -1742,3 +1651,49 @@ func TestQuerier_DetectedLabels(t *testing.T) { require.NoError(t, err) }) } + +func BenchmarkQuerierDetectedLabels(b *testing.B) { + now := time.Now() + + limits, _ := validation.NewOverrides(defaultLimitsTestConfig(), nil) + ctx := user.InjectOrgID(context.Background(), "test") + + conf := mockQuerierConfig() + conf.IngesterQueryStoreMaxLookback = 0 + + request := logproto.DetectedLabelsRequest{ + Start: now, + End: now, + Query: "", + } + ingesterResponse := logproto.LabelToValuesResponse{Labels: map[string]*logproto.UniqueLabelValues{ + "cluster": {Values: []string{"ingester"}}, + "ingesterLabel": {Values: []string{"abc", "def", "ghi", "abc"}}, + }} + + ingesterClient := newQuerierClientMock() + storeClient := newStoreMock() + + ingesterClient.On("GetDetectedLabels", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(&ingesterResponse, nil) + storeClient.On("LabelNamesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return([]string{"storeLabel"}, nil). + On("LabelValuesForMetricName", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, "storeLabel", mock.Anything). + Return([]string{"val1", "val2"}, nil) + + querier, _ := newQuerier( + conf, + mockIngesterClientConfig(), + newIngesterClientMockFactory(ingesterClient), + mockReadRingWithOneActiveIngester(), + &mockDeleteGettter{}, + storeClient, limits) + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + _, err := querier.DetectedLabels(ctx, &request) + assert.NoError(b, err) + } +} diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 6626d08f87753..01ff8772a4c75 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -272,19 +272,19 @@ func (r *DetectedLabelsRequest) AsProto() *logproto.DetectedLabelsRequest { } func (r *DetectedLabelsRequest) GetEnd() time.Time { - return *r.End + return r.End } func (r *DetectedLabelsRequest) GetEndTs() time.Time { - return *r.End + return r.End } func (r *DetectedLabelsRequest) GetStart() time.Time { - return *r.Start + return r.Start } func (r *DetectedLabelsRequest) GetStartTs() time.Time { - return *r.Start + return r.Start } func (r *DetectedLabelsRequest) GetStep() int64 { @@ -293,8 +293,8 @@ func (r *DetectedLabelsRequest) GetStep() int64 { func (r *DetectedLabelsRequest) WithStartEnd(s, e time.Time) queryrangebase.Request { clone := *r - clone.Start = &s - clone.End = &e + clone.Start = s + clone.End = e return &clone } @@ -1546,6 +1546,25 @@ func (Codec) MergeResponse(responses ...queryrangebase.Response) (queryrangebase }, Headers: headers, }, nil + case *DetectedLabelsResponse: + resp0 := responses[0].(*DetectedLabelsResponse) + headers := resp0.Headers + var labels []*logproto.DetectedLabel + + for _, r := range responses { + labels = append(labels, r.(*DetectedLabelsResponse).Response.DetectedLabels...) + } + mergedLabels, err := detected.MergeLabels(labels) + if err != nil { + return nil, err + } + + return &DetectedLabelsResponse{ + Response: &logproto.DetectedLabelsResponse{ + DetectedLabels: mergedLabels, + }, + Headers: headers, + }, nil default: return nil, fmt.Errorf("unknown response type (%T) in merging responses", responses[0]) } @@ -1744,6 +1763,10 @@ func ParamsFromRequest(req queryrangebase.Request) (logql.Params, error) { return ¶msDetectedFieldsWrapper{ DetectedFieldsRequest: r, }, nil + case *DetectedLabelsRequest: + return ¶msDetectedLabelsWrapper{ + DetectedLabelsRequest: r, + }, nil default: return nil, fmt.Errorf("expected one of the *LokiRequest, *LokiInstantRequest, *LokiSeriesRequest, *LokiLabelNamesRequest, *DetectedFieldsRequest, got (%T)", r) } @@ -1968,6 +1991,51 @@ func (p paramsDetectedFieldsWrapper) Shards() []string { return make([]string, 0) } +type paramsDetectedLabelsWrapper struct { + *DetectedLabelsRequest +} + +func (p paramsDetectedLabelsWrapper) QueryString() string { + return p.GetQuery() +} + +func (p paramsDetectedLabelsWrapper) GetExpression() syntax.Expr { + expr, err := syntax.ParseExpr(p.GetQuery()) + if err != nil { + return nil + } + + return expr +} + +func (p paramsDetectedLabelsWrapper) Start() time.Time { + return p.GetStartTs() +} + +func (p paramsDetectedLabelsWrapper) End() time.Time { + return p.GetEndTs() +} + +func (p paramsDetectedLabelsWrapper) Step() time.Duration { + return time.Duration(p.GetStep() * 1e6) +} + +func (p paramsDetectedLabelsWrapper) Interval() time.Duration { + return 0 +} + +func (p paramsDetectedLabelsWrapper) Direction() logproto.Direction { + return logproto.BACKWARD +} +func (p paramsDetectedLabelsWrapper) Limit() uint32 { return 0 } +func (p paramsDetectedLabelsWrapper) Shards() []string { + return make([]string, 0) +} + +func (p paramsDetectedLabelsWrapper) GetStoreChunks() *logproto.ChunkRefGroup { + return nil +} + func (p paramsDetectedFieldsWrapper) GetStoreChunks() *logproto.ChunkRefGroup { return nil } @@ -2036,6 +2104,10 @@ func NewEmptyResponse(r queryrangebase.Request) (queryrangebase.Response, error) return &VolumeResponse{ Response: &logproto.VolumeResponse{}, }, nil + case *DetectedLabelsRequest: + return &DetectedLabelsResponse{ + Response: &logproto.DetectedLabelsResponse{}, + }, nil default: return nil, fmt.Errorf("unsupported request type %T", req) } diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index ff7c4ba4dbff1..61da06929fe14 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -261,8 +261,8 @@ func NewMiddleware( schema, metrics, indexStatsTripperware, - metricsNamespace) - + metricsNamespace, + codec, limits, iqo) if err != nil { return nil, nil, err } @@ -284,16 +284,17 @@ func NewMiddleware( }), StopperWrapper{resultsCache, statsCache, volumeCache}, nil } -func NewDetectedLabelsTripperware(cfg Config, opts logql.EngineOpts, logger log.Logger, l Limits, schema config.SchemaConfig, metrics *Metrics, mw base.Middleware, namespace string) (base.Middleware, error) { +func NewDetectedLabelsTripperware(cfg Config, opts logql.EngineOpts, logger log.Logger, l Limits, schema config.SchemaConfig, metrics *Metrics, mw base.Middleware, namespace string, merger base.Merger, limits Limits, iqo util.IngesterQueryOptions) (base.Middleware, error) { return base.MiddlewareFunc(func(next base.Handler) base.Handler { statsHandler := mw.Wrap(next) + splitter := newDefaultSplitter(limits, iqo) queryRangeMiddleware := []base.Middleware{ StatsCollectorMiddleware(), NewLimitsMiddleware(l), NewQuerySizeLimiterMiddleware(schema.Configs, opts, logger, l, statsHandler), base.InstrumentMiddleware("split_by_interval", metrics.InstrumentMiddlewareMetrics), - } + SplitByIntervalMiddleware(schema.Configs, limits, merger, splitter, metrics.SplitByMetrics)} // The sharding middleware takes care of enforcing this limit for both shardable and non-shardable queries. // If we are not using sharding, we enforce the limit by adding this middleware after time splitting. @@ -307,11 +308,38 @@ func NewDetectedLabelsTripperware(cfg Config, opts logql.EngineOpts, logger log. base.NewRetryMiddleware(logger, cfg.MaxRetries, metrics.RetryMiddlewareMetrics, namespace), ) } - - return NewLimitedRoundTripper(next, l, schema.Configs, queryRangeMiddleware...) + limitedRt := NewLimitedRoundTripper(next, l, schema.Configs, queryRangeMiddleware...) + return NewDetectedLabelsCardinalityFilter(limitedRt) }), nil } +func NewDetectedLabelsCardinalityFilter(rt queryrangebase.Handler) queryrangebase.Handler { + return queryrangebase.HandlerFunc( + func(ctx context.Context, req queryrangebase.Request) (queryrangebase.Response, error) { + res, err := rt.Do(ctx, req) + if err != nil { + return nil, err + } + + resp, ok := res.(*DetectedLabelsResponse) + if !ok { + return res, nil + } + + var result []*logproto.DetectedLabel + + for _, dl := range resp.Response.DetectedLabels { + if dl.Cardinality > 2 && dl.Cardinality < 50 { + result = append(result, &logproto.DetectedLabel{Label: dl.Label, Cardinality: dl.Cardinality}) + } + } + return &DetectedLabelsResponse{ + Response: &logproto.DetectedLabelsResponse{DetectedLabels: result}, + Headers: resp.Headers, + }, nil + }) +} + type roundTripper struct { logger log.Logger @@ -442,7 +470,16 @@ func (r roundTripper) Do(ctx context.Context, req base.Request) (base.Response, ) return r.detectedFields.Do(ctx, req) - // TODO(shantanu): Add DetectedLabels + case *DetectedLabelsRequest: + level.Info(logger).Log( + "msg", "executing query", + "type", "detected_label", + "end", op.End, + "length", op.End.Sub(op.Start), + "query", op.Query, + "start", op.Start, + ) + return r.detectedLabels.Do(ctx, req) default: return r.next.Do(ctx, req) } diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go index fc71742859798..7dfeb729e149a 100644 --- a/pkg/querier/queryrange/split_by_interval.go +++ b/pkg/querier/queryrange/split_by_interval.go @@ -228,7 +228,7 @@ func (h *splitByInterval) Do(ctx context.Context, r queryrangebase.Request) (que for i, j := 0, len(intervals)-1; i < j; i, j = i+1, j-1 { intervals[i], intervals[j] = intervals[j], intervals[i] } - case *LokiSeriesRequest, *LabelRequest, *logproto.IndexStatsRequest, *logproto.VolumeRequest, *logproto.ShardsRequest: + case *LokiSeriesRequest, *LabelRequest, *logproto.IndexStatsRequest, *logproto.VolumeRequest, *logproto.ShardsRequest, *DetectedLabelsRequest: // Set this to 0 since this is not used in Series/Labels/Index Request. limit = 0 default: diff --git a/pkg/querier/queryrange/splitters.go b/pkg/querier/queryrange/splitters.go index 42a81f6defd39..fe3453b2ee717 100644 --- a/pkg/querier/queryrange/splitters.go +++ b/pkg/querier/queryrange/splitters.go @@ -1,10 +1,14 @@ package queryrange import ( + "fmt" "time" + "github.com/go-kit/log/level" "github.com/prometheus/common/model" + util_log "github.com/grafana/loki/v3/pkg/util/log" + "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase" "github.com/grafana/loki/v3/pkg/util" @@ -109,7 +113,19 @@ func (s *defaultSplitter) split(execTime time.Time, tenantIDs []string, req quer path: r.path, }) } + case *DetectedLabelsRequest: + factory = func(start, end time.Time) { + reqs = append(reqs, &DetectedLabelsRequest{ + DetectedLabelsRequest: logproto.DetectedLabelsRequest{ + Start: start, + End: end, + Query: r.Query, + }, + path: r.path, + }) + } default: + level.Warn(util_log.Logger).Log("msg", fmt.Sprintf("splitter: unsupported request type: %T", req)) return nil, nil } diff --git a/pkg/querier/queryrange/stats.go b/pkg/querier/queryrange/stats.go index 384ee7ceed53c..67ca803d52964 100644 --- a/pkg/querier/queryrange/stats.go +++ b/pkg/querier/queryrange/stats.go @@ -179,6 +179,10 @@ func StatsCollectorMiddleware() queryrangebase.Middleware { responseStats = &stats.Result{} // TODO: support stats in query patterns totalEntries = len(r.Response.Series) queryType = queryTypeQueryPatterns + case *DetectedLabelsResponse: + responseStats = &stats.Result{} + totalEntries = 1 + queryType = queryTypeDetectedLabels default: level.Warn(logger).Log("msg", fmt.Sprintf("cannot compute stats, unexpected type: %T", resp)) } diff --git a/pkg/storage/detected/labels.go b/pkg/storage/detected/labels.go new file mode 100644 index 0000000000000..66b721a79b800 --- /dev/null +++ b/pkg/storage/detected/labels.go @@ -0,0 +1,64 @@ +package detected + +import ( + "github.com/axiomhq/hyperloglog" + + "github.com/grafana/loki/v3/pkg/logproto" +) + +type UnmarshaledDetectedLabel struct { + Label string + Sketch *hyperloglog.Sketch +} + +func unmarshalDetectedLabel(l *logproto.DetectedLabel) (*UnmarshaledDetectedLabel, error) { + sketch := hyperloglog.New() + err := sketch.UnmarshalBinary(l.Sketch) + if err != nil { + return nil, err + } + return &UnmarshaledDetectedLabel{ + Label: l.Label, + Sketch: sketch, + }, nil +} + +func (m *UnmarshaledDetectedLabel) Merge(dl *logproto.DetectedLabel) error { + sketch := hyperloglog.New() + err := sketch.UnmarshalBinary(dl.Sketch) + if err != nil { + return err + } + return m.Sketch.Merge(sketch) +} + +func MergeLabels(labels []*logproto.DetectedLabel) (result []*logproto.DetectedLabel, err error) { + mergedLabels := make(map[string]*UnmarshaledDetectedLabel) + for _, label := range labels { + l, ok := mergedLabels[label.Label] + if !ok { + unmarshaledLabel, err := unmarshalDetectedLabel(label) + if err != nil { + return nil, err + } + mergedLabels[label.Label] = unmarshaledLabel + } else { + err := l.Merge(label) + if err != nil { + return nil, err + } + } + } + + for _, label := range mergedLabels { + detectedLabel := &logproto.DetectedLabel{ + Label: label.Label, + Cardinality: label.Sketch.Estimate(), + Sketch: nil, + } + + result = append(result, detectedLabel) + } + + return +} From 4a5edf1a2af9e8af1842dc8d9b5482659d61031e Mon Sep 17 00:00:00 2001 From: benclive Date: Wed, 15 May 2024 20:22:19 +0100 Subject: [PATCH 32/47] perf: Replace channel check with atomic bool in tailer.send() (#12976) --- pkg/ingester/tailer.go | 13 ++++++------- pkg/ingester/tailer_test.go | 23 +++++++++++++++++++++++ 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/pkg/ingester/tailer.go b/pkg/ingester/tailer.go index 80cceba78fca6..441c688612d9e 100644 --- a/pkg/ingester/tailer.go +++ b/pkg/ingester/tailer.go @@ -4,6 +4,7 @@ import ( "encoding/binary" "hash/fnv" "sync" + "sync/atomic" "time" "github.com/go-kit/log/level" @@ -46,6 +47,7 @@ type tailer struct { // and the loop and senders should stop closeChan chan struct{} closeOnce sync.Once + closed atomic.Bool blockedAt *time.Time blockedMtx sync.RWMutex @@ -74,6 +76,7 @@ func newTailer(orgID string, expr syntax.LogSelectorExpr, conn TailServer, maxDr maxDroppedStreams: maxDroppedStreams, id: generateUniqueID(orgID, expr.String()), closeChan: make(chan struct{}), + closed: atomic.Bool{}, pipeline: pipeline, }, nil } @@ -227,17 +230,13 @@ func isMatching(lbs labels.Labels, matchers []*labels.Matcher) bool { } func (t *tailer) isClosed() bool { - select { - case <-t.closeChan: - return true - default: - return false - } + return t.closed.Load() } func (t *tailer) close() { t.closeOnce.Do(func() { - // Signal the close channel + // Signal the close channel & flip the atomic bool so tailers will exit + t.closed.Store(true) close(t.closeChan) // We intentionally do not close sendChan in order to avoid a panic on diff --git a/pkg/ingester/tailer_test.go b/pkg/ingester/tailer_test.go index 1f49ec0095086..f52e87040ce37 100644 --- a/pkg/ingester/tailer_test.go +++ b/pkg/ingester/tailer_test.go @@ -17,6 +17,7 @@ import ( ) func TestTailer_RoundTrip(t *testing.T) { + t.Parallel() server := &fakeTailServer{} lbs := makeRandomLabels() @@ -66,6 +67,7 @@ func TestTailer_RoundTrip(t *testing.T) { } func TestTailer_sendRaceConditionOnSendWhileClosing(t *testing.T) { + t.Parallel() runs := 100 stream := logproto.Stream{ @@ -103,6 +105,7 @@ func TestTailer_sendRaceConditionOnSendWhileClosing(t *testing.T) { } func Test_dropstream(t *testing.T) { + t.Parallel() maxDroppedStreams := 10 entry := logproto.Entry{Timestamp: time.Now(), Line: "foo"} @@ -224,6 +227,7 @@ func Test_TailerSendRace(t *testing.T) { } func Test_IsMatching(t *testing.T) { + t.Parallel() for _, tt := range []struct { name string lbs labels.Labels @@ -241,6 +245,7 @@ func Test_IsMatching(t *testing.T) { } func Test_StructuredMetadata(t *testing.T) { + t.Parallel() lbs := makeRandomLabels() for _, tc := range []struct { @@ -364,3 +369,21 @@ func Test_StructuredMetadata(t *testing.T) { }) } } + +func Benchmark_isClosed(t *testing.B) { + var server fakeTailServer + expr, err := syntax.ParseLogSelector(`{app="foo"}`, true) + require.NoError(t, err) + tail, err := newTailer("foo", expr, &server, 0) + require.NoError(t, err) + + require.Equal(t, false, tail.isClosed()) + + t.ResetTimer() + for i := 0; i < t.N; i++ { + tail.isClosed() + } + + tail.close() + require.Equal(t, true, tail.isClosed()) +} From 87288d37f9e9c1e90295bf785adbc4bfdb66fb30 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Thu, 16 May 2024 10:16:28 +0200 Subject: [PATCH 33/47] feat(reporting): Report cpu usage (#12970) --- go.mod | 12 +- go.sum | 17 + pkg/analytics/reporter.go | 37 + pkg/analytics/reporter_test.go | 14 + pkg/analytics/stats.go | 6 +- vendor/github.com/go-ole/go-ole/.travis.yml | 8 + vendor/github.com/go-ole/go-ole/ChangeLog.md | 49 + vendor/github.com/go-ole/go-ole/LICENSE | 21 + vendor/github.com/go-ole/go-ole/README.md | 46 + vendor/github.com/go-ole/go-ole/appveyor.yml | 54 + vendor/github.com/go-ole/go-ole/com.go | 344 +++++ vendor/github.com/go-ole/go-ole/com_func.go | 174 +++ vendor/github.com/go-ole/go-ole/connect.go | 192 +++ vendor/github.com/go-ole/go-ole/constants.go | 153 +++ vendor/github.com/go-ole/go-ole/error.go | 51 + vendor/github.com/go-ole/go-ole/error_func.go | 8 + .../github.com/go-ole/go-ole/error_windows.go | 24 + vendor/github.com/go-ole/go-ole/guid.go | 284 ++++ .../go-ole/go-ole/iconnectionpoint.go | 20 + .../go-ole/go-ole/iconnectionpoint_func.go | 21 + .../go-ole/go-ole/iconnectionpoint_windows.go | 43 + .../go-ole/iconnectionpointcontainer.go | 17 + .../go-ole/iconnectionpointcontainer_func.go | 11 + .../iconnectionpointcontainer_windows.go | 25 + vendor/github.com/go-ole/go-ole/idispatch.go | 94 ++ .../go-ole/go-ole/idispatch_func.go | 19 + .../go-ole/go-ole/idispatch_windows.go | 202 +++ .../github.com/go-ole/go-ole/ienumvariant.go | 19 + .../go-ole/go-ole/ienumvariant_func.go | 19 + .../go-ole/go-ole/ienumvariant_windows.go | 63 + .../github.com/go-ole/go-ole/iinspectable.go | 18 + .../go-ole/go-ole/iinspectable_func.go | 15 + .../go-ole/go-ole/iinspectable_windows.go | 72 + .../go-ole/go-ole/iprovideclassinfo.go | 21 + .../go-ole/go-ole/iprovideclassinfo_func.go | 7 + .../go-ole/iprovideclassinfo_windows.go | 21 + vendor/github.com/go-ole/go-ole/itypeinfo.go | 34 + .../go-ole/go-ole/itypeinfo_func.go | 7 + .../go-ole/go-ole/itypeinfo_windows.go | 21 + vendor/github.com/go-ole/go-ole/iunknown.go | 57 + .../github.com/go-ole/go-ole/iunknown_func.go | 19 + .../go-ole/go-ole/iunknown_windows.go | 58 + vendor/github.com/go-ole/go-ole/ole.go | 190 +++ .../go-ole/go-ole/oleutil/connection.go | 100 ++ .../go-ole/go-ole/oleutil/connection_func.go | 10 + .../go-ole/oleutil/connection_windows.go | 58 + .../go-ole/go-ole/oleutil/go-get.go | 6 + .../go-ole/go-ole/oleutil/oleutil.go | 127 ++ vendor/github.com/go-ole/go-ole/safearray.go | 27 + .../go-ole/go-ole/safearray_func.go | 211 +++ .../go-ole/go-ole/safearray_windows.go | 337 +++++ .../go-ole/go-ole/safearrayconversion.go | 140 ++ .../go-ole/go-ole/safearrayslices.go | 33 + vendor/github.com/go-ole/go-ole/utility.go | 101 ++ vendor/github.com/go-ole/go-ole/variables.go | 15 + vendor/github.com/go-ole/go-ole/variant.go | 105 ++ .../github.com/go-ole/go-ole/variant_386.go | 11 + .../github.com/go-ole/go-ole/variant_amd64.go | 12 + .../github.com/go-ole/go-ole/variant_arm.go | 11 + .../github.com/go-ole/go-ole/variant_arm64.go | 13 + .../go-ole/go-ole/variant_date_386.go | 22 + .../go-ole/go-ole/variant_date_amd64.go | 20 + .../go-ole/go-ole/variant_date_arm.go | 22 + .../go-ole/go-ole/variant_date_arm64.go | 23 + .../go-ole/go-ole/variant_ppc64le.go | 12 + .../github.com/go-ole/go-ole/variant_s390x.go | 12 + vendor/github.com/go-ole/go-ole/vt_string.go | 58 + vendor/github.com/go-ole/go-ole/winrt.go | 99 ++ vendor/github.com/go-ole/go-ole/winrt_doc.go | 36 + vendor/github.com/lufia/plan9stats/.gitignore | 12 + vendor/github.com/lufia/plan9stats/LICENSE | 29 + vendor/github.com/lufia/plan9stats/README.md | 2 + vendor/github.com/lufia/plan9stats/cpu.go | 288 ++++ vendor/github.com/lufia/plan9stats/doc.go | 2 + vendor/github.com/lufia/plan9stats/host.go | 303 +++++ vendor/github.com/lufia/plan9stats/int.go | 31 + vendor/github.com/lufia/plan9stats/opts.go | 21 + vendor/github.com/lufia/plan9stats/stats.go | 88 ++ .../github.com/power-devops/perfstat/LICENSE | 23 + .../power-devops/perfstat/c_helpers.c | 159 +++ .../power-devops/perfstat/c_helpers.h | 58 + .../power-devops/perfstat/config.go | 18 + .../power-devops/perfstat/cpustat.go | 98 ++ .../power-devops/perfstat/diskstat.go | 137 ++ .../github.com/power-devops/perfstat/doc.go | 315 +++++ .../power-devops/perfstat/fsstat.go | 31 + .../power-devops/perfstat/helpers.go | 764 +++++++++++ .../power-devops/perfstat/lparstat.go | 26 + .../power-devops/perfstat/lvmstat.go | 72 + .../power-devops/perfstat/memstat.go | 84 ++ .../power-devops/perfstat/netstat.go | 117 ++ .../power-devops/perfstat/procstat.go | 75 ++ .../power-devops/perfstat/sysconf.go | 195 +++ .../power-devops/perfstat/systemcfg.go | 635 +++++++++ .../power-devops/perfstat/types_cpu.go | 186 +++ .../power-devops/perfstat/types_disk.go | 176 +++ .../power-devops/perfstat/types_fs.go | 195 +++ .../power-devops/perfstat/types_lpar.go | 68 + .../power-devops/perfstat/types_lvm.go | 31 + .../power-devops/perfstat/types_memory.go | 101 ++ .../power-devops/perfstat/types_network.go | 163 +++ .../power-devops/perfstat/types_process.go | 43 + .../power-devops/perfstat/uptime.go | 35 + vendor/github.com/shirou/gopsutil/v4/LICENSE | 61 + .../shirou/gopsutil/v4/common/env.go | 24 + .../github.com/shirou/gopsutil/v4/cpu/cpu.go | 201 +++ .../shirou/gopsutil/v4/cpu/cpu_aix.go | 16 + .../shirou/gopsutil/v4/cpu/cpu_aix_cgo.go | 66 + .../shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go | 92 ++ .../shirou/gopsutil/v4/cpu/cpu_darwin.go | 117 ++ .../shirou/gopsutil/v4/cpu/cpu_darwin_cgo.go | 111 ++ .../gopsutil/v4/cpu/cpu_darwin_nocgo.go | 14 + .../shirou/gopsutil/v4/cpu/cpu_dragonfly.go | 157 +++ .../gopsutil/v4/cpu/cpu_dragonfly_amd64.go | 10 + .../shirou/gopsutil/v4/cpu/cpu_fallback.go | 31 + .../shirou/gopsutil/v4/cpu/cpu_freebsd.go | 169 +++ .../shirou/gopsutil/v4/cpu/cpu_freebsd_386.go | 10 + .../gopsutil/v4/cpu/cpu_freebsd_amd64.go | 10 + .../shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go | 10 + .../gopsutil/v4/cpu/cpu_freebsd_arm64.go | 10 + .../shirou/gopsutil/v4/cpu/cpu_linux.go | 479 +++++++ .../shirou/gopsutil/v4/cpu/cpu_netbsd.go | 119 ++ .../gopsutil/v4/cpu/cpu_netbsd_amd64.go | 10 + .../gopsutil/v4/cpu/cpu_netbsd_arm64.go | 10 + .../shirou/gopsutil/v4/cpu/cpu_openbsd.go | 137 ++ .../shirou/gopsutil/v4/cpu/cpu_openbsd_386.go | 11 + .../gopsutil/v4/cpu/cpu_openbsd_amd64.go | 11 + .../shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go | 11 + .../gopsutil/v4/cpu/cpu_openbsd_arm64.go | 11 + .../gopsutil/v4/cpu/cpu_openbsd_riscv64.go | 11 + .../shirou/gopsutil/v4/cpu/cpu_plan9.go | 50 + .../shirou/gopsutil/v4/cpu/cpu_solaris.go | 270 ++++ .../shirou/gopsutil/v4/cpu/cpu_windows.go | 227 ++++ .../gopsutil/v4/internal/common/binary.go | 638 +++++++++ .../gopsutil/v4/internal/common/common.go | 465 +++++++ .../v4/internal/common/common_darwin.go | 66 + .../v4/internal/common/common_freebsd.go | 82 ++ .../v4/internal/common/common_linux.go | 353 +++++ .../v4/internal/common/common_netbsd.go | 66 + .../v4/internal/common/common_openbsd.go | 66 + .../v4/internal/common/common_unix.go | 62 + .../v4/internal/common/common_windows.go | 304 +++++ .../gopsutil/v4/internal/common/endian.go | 11 + .../gopsutil/v4/internal/common/sleep.go | 22 + .../gopsutil/v4/internal/common/warnings.go | 31 + .../shirou/gopsutil/v4/mem/ex_linux.go | 40 + .../shirou/gopsutil/v4/mem/ex_windows.go | 39 + .../github.com/shirou/gopsutil/v4/mem/mem.go | 121 ++ .../shirou/gopsutil/v4/mem/mem_aix.go | 16 + .../shirou/gopsutil/v4/mem/mem_aix_cgo.go | 51 + .../shirou/gopsutil/v4/mem/mem_aix_nocgo.go | 78 ++ .../shirou/gopsutil/v4/mem/mem_bsd.go | 87 ++ .../shirou/gopsutil/v4/mem/mem_darwin.go | 72 + .../shirou/gopsutil/v4/mem/mem_darwin_cgo.go | 58 + .../gopsutil/v4/mem/mem_darwin_nocgo.go | 89 ++ .../shirou/gopsutil/v4/mem/mem_fallback.go | 34 + .../shirou/gopsutil/v4/mem/mem_freebsd.go | 167 +++ .../shirou/gopsutil/v4/mem/mem_linux.go | 506 +++++++ .../shirou/gopsutil/v4/mem/mem_netbsd.go | 87 ++ .../shirou/gopsutil/v4/mem/mem_openbsd.go | 100 ++ .../shirou/gopsutil/v4/mem/mem_openbsd_386.go | 38 + .../gopsutil/v4/mem/mem_openbsd_amd64.go | 33 + .../shirou/gopsutil/v4/mem/mem_openbsd_arm.go | 38 + .../gopsutil/v4/mem/mem_openbsd_arm64.go | 38 + .../gopsutil/v4/mem/mem_openbsd_riscv64.go | 38 + .../shirou/gopsutil/v4/mem/mem_plan9.go | 68 + .../shirou/gopsutil/v4/mem/mem_solaris.go | 213 +++ .../shirou/gopsutil/v4/mem/mem_windows.go | 166 +++ .../github.com/shirou/gopsutil/v4/net/net.go | 274 ++++ .../shirou/gopsutil/v4/net/net_aix.go | 330 +++++ .../shirou/gopsutil/v4/net/net_aix_cgo.go | 36 + .../shirou/gopsutil/v4/net/net_aix_nocgo.go | 95 ++ .../shirou/gopsutil/v4/net/net_darwin.go | 291 ++++ .../shirou/gopsutil/v4/net/net_fallback.go | 93 ++ .../shirou/gopsutil/v4/net/net_freebsd.go | 128 ++ .../shirou/gopsutil/v4/net/net_linux.go | 910 +++++++++++++ .../shirou/gopsutil/v4/net/net_openbsd.go | 320 +++++ .../shirou/gopsutil/v4/net/net_solaris.go | 144 ++ .../shirou/gopsutil/v4/net/net_unix.go | 224 ++++ .../shirou/gopsutil/v4/net/net_windows.go | 779 +++++++++++ .../shirou/gopsutil/v4/process/process.go | 628 +++++++++ .../shirou/gopsutil/v4/process/process_bsd.go | 76 ++ .../gopsutil/v4/process/process_darwin.go | 325 +++++ .../v4/process/process_darwin_amd64.go | 237 ++++ .../v4/process/process_darwin_arm64.go | 213 +++ .../gopsutil/v4/process/process_darwin_cgo.go | 222 +++ .../v4/process/process_darwin_nocgo.go | 127 ++ .../gopsutil/v4/process/process_fallback.go | 203 +++ .../gopsutil/v4/process/process_freebsd.go | 342 +++++ .../v4/process/process_freebsd_386.go | 193 +++ .../v4/process/process_freebsd_amd64.go | 193 +++ .../v4/process/process_freebsd_arm.go | 193 +++ .../v4/process/process_freebsd_arm64.go | 202 +++ .../gopsutil/v4/process/process_linux.go | 1187 +++++++++++++++++ .../gopsutil/v4/process/process_openbsd.go | 387 ++++++ .../v4/process/process_openbsd_386.go | 202 +++ .../v4/process/process_openbsd_amd64.go | 201 +++ .../v4/process/process_openbsd_arm.go | 202 +++ .../v4/process/process_openbsd_arm64.go | 203 +++ .../v4/process/process_openbsd_riscv64.go | 204 +++ .../gopsutil/v4/process/process_plan9.go | 203 +++ .../gopsutil/v4/process/process_posix.go | 185 +++ .../gopsutil/v4/process/process_solaris.go | 304 +++++ .../gopsutil/v4/process/process_windows.go | 1165 ++++++++++++++++ .../v4/process/process_windows_32bit.go | 108 ++ .../v4/process/process_windows_64bit.go | 79 ++ .../shoenig/go-m1cpu/.golangci.yaml | 12 + vendor/github.com/shoenig/go-m1cpu/LICENSE | 363 +++++ vendor/github.com/shoenig/go-m1cpu/Makefile | 12 + vendor/github.com/shoenig/go-m1cpu/README.md | 66 + vendor/github.com/shoenig/go-m1cpu/cpu.go | 213 +++ .../shoenig/go-m1cpu/incompatible.go | 53 + .../tklauser/go-sysconf/.cirrus.yml | 23 + .../github.com/tklauser/go-sysconf/.gitignore | 1 + vendor/github.com/tklauser/go-sysconf/LICENSE | 29 + .../github.com/tklauser/go-sysconf/README.md | 46 + .../github.com/tklauser/go-sysconf/sysconf.go | 21 + .../tklauser/go-sysconf/sysconf_bsd.go | 38 + .../tklauser/go-sysconf/sysconf_darwin.go | 296 ++++ .../tklauser/go-sysconf/sysconf_dragonfly.go | 220 +++ .../tklauser/go-sysconf/sysconf_freebsd.go | 226 ++++ .../tklauser/go-sysconf/sysconf_generic.go | 46 + .../tklauser/go-sysconf/sysconf_linux.go | 345 +++++ .../tklauser/go-sysconf/sysconf_netbsd.go | 250 ++++ .../tklauser/go-sysconf/sysconf_openbsd.go | 271 ++++ .../tklauser/go-sysconf/sysconf_posix.go | 83 ++ .../tklauser/go-sysconf/sysconf_solaris.go | 14 + .../go-sysconf/sysconf_unsupported.go | 17 + .../go-sysconf/zsysconf_defs_darwin.go | 254 ++++ .../go-sysconf/zsysconf_defs_dragonfly.go | 228 ++++ .../go-sysconf/zsysconf_defs_freebsd.go | 229 ++++ .../go-sysconf/zsysconf_defs_linux.go | 147 ++ .../go-sysconf/zsysconf_defs_netbsd.go | 164 +++ .../go-sysconf/zsysconf_defs_openbsd.go | 263 ++++ .../go-sysconf/zsysconf_defs_solaris.go | 139 ++ .../go-sysconf/zsysconf_values_freebsd_386.go | 12 + .../zsysconf_values_freebsd_amd64.go | 12 + .../go-sysconf/zsysconf_values_freebsd_arm.go | 12 + .../zsysconf_values_freebsd_arm64.go | 12 + .../zsysconf_values_freebsd_riscv64.go | 12 + .../go-sysconf/zsysconf_values_linux_386.go | 114 ++ .../go-sysconf/zsysconf_values_linux_amd64.go | 114 ++ .../go-sysconf/zsysconf_values_linux_arm.go | 114 ++ .../go-sysconf/zsysconf_values_linux_arm64.go | 114 ++ .../zsysconf_values_linux_loong64.go | 114 ++ .../go-sysconf/zsysconf_values_linux_mips.go | 114 ++ .../zsysconf_values_linux_mips64.go | 114 ++ .../zsysconf_values_linux_mips64le.go | 114 ++ .../zsysconf_values_linux_mipsle.go | 114 ++ .../go-sysconf/zsysconf_values_linux_ppc64.go | 114 ++ .../zsysconf_values_linux_ppc64le.go | 114 ++ .../zsysconf_values_linux_riscv64.go | 114 ++ .../go-sysconf/zsysconf_values_linux_s390x.go | 114 ++ .../go-sysconf/zsysconf_values_netbsd_386.go | 11 + .../zsysconf_values_netbsd_amd64.go | 11 + .../go-sysconf/zsysconf_values_netbsd_arm.go | 11 + .../zsysconf_values_netbsd_arm64.go | 11 + .../github.com/tklauser/numcpus/.cirrus.yml | 13 + vendor/github.com/tklauser/numcpus/LICENSE | 202 +++ vendor/github.com/tklauser/numcpus/README.md | 52 + vendor/github.com/tklauser/numcpus/numcpus.go | 75 ++ .../tklauser/numcpus/numcpus_bsd.go | 66 + .../tklauser/numcpus/numcpus_linux.go | 120 ++ .../tklauser/numcpus/numcpus_solaris.go | 56 + .../tklauser/numcpus/numcpus_unsupported.go | 42 + .../tklauser/numcpus/numcpus_windows.go | 41 + vendor/github.com/yusufpapurcu/wmi/LICENSE | 20 + vendor/github.com/yusufpapurcu/wmi/README.md | 6 + .../yusufpapurcu/wmi/swbemservices.go | 261 ++++ vendor/github.com/yusufpapurcu/wmi/wmi.go | 603 +++++++++ vendor/modules.txt | 30 + 271 files changed, 34246 insertions(+), 2 deletions(-) create mode 100644 vendor/github.com/go-ole/go-ole/.travis.yml create mode 100644 vendor/github.com/go-ole/go-ole/ChangeLog.md create mode 100644 vendor/github.com/go-ole/go-ole/LICENSE create mode 100644 vendor/github.com/go-ole/go-ole/README.md create mode 100644 vendor/github.com/go-ole/go-ole/appveyor.yml create mode 100644 vendor/github.com/go-ole/go-ole/com.go create mode 100644 vendor/github.com/go-ole/go-ole/com_func.go create mode 100644 vendor/github.com/go-ole/go-ole/connect.go create mode 100644 vendor/github.com/go-ole/go-ole/constants.go create mode 100644 vendor/github.com/go-ole/go-ole/error.go create mode 100644 vendor/github.com/go-ole/go-ole/error_func.go create mode 100644 vendor/github.com/go-ole/go-ole/error_windows.go create mode 100644 vendor/github.com/go-ole/go-ole/guid.go create mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpoint.go create mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go create mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go create mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go create mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go create mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go create mode 100644 vendor/github.com/go-ole/go-ole/idispatch.go create mode 100644 vendor/github.com/go-ole/go-ole/idispatch_func.go create mode 100644 vendor/github.com/go-ole/go-ole/idispatch_windows.go create mode 100644 vendor/github.com/go-ole/go-ole/ienumvariant.go create mode 100644 vendor/github.com/go-ole/go-ole/ienumvariant_func.go create mode 100644 vendor/github.com/go-ole/go-ole/ienumvariant_windows.go create mode 100644 vendor/github.com/go-ole/go-ole/iinspectable.go create mode 100644 vendor/github.com/go-ole/go-ole/iinspectable_func.go create mode 100644 vendor/github.com/go-ole/go-ole/iinspectable_windows.go create mode 100644 vendor/github.com/go-ole/go-ole/iprovideclassinfo.go create mode 100644 vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go create mode 100644 vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go create mode 100644 vendor/github.com/go-ole/go-ole/itypeinfo.go create mode 100644 vendor/github.com/go-ole/go-ole/itypeinfo_func.go create mode 100644 vendor/github.com/go-ole/go-ole/itypeinfo_windows.go create mode 100644 vendor/github.com/go-ole/go-ole/iunknown.go create mode 100644 vendor/github.com/go-ole/go-ole/iunknown_func.go create mode 100644 vendor/github.com/go-ole/go-ole/iunknown_windows.go create mode 100644 vendor/github.com/go-ole/go-ole/ole.go create mode 100644 vendor/github.com/go-ole/go-ole/oleutil/connection.go create mode 100644 vendor/github.com/go-ole/go-ole/oleutil/connection_func.go create mode 100644 vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go create mode 100644 vendor/github.com/go-ole/go-ole/oleutil/go-get.go create mode 100644 vendor/github.com/go-ole/go-ole/oleutil/oleutil.go create mode 100644 vendor/github.com/go-ole/go-ole/safearray.go create mode 100644 vendor/github.com/go-ole/go-ole/safearray_func.go create mode 100644 vendor/github.com/go-ole/go-ole/safearray_windows.go create mode 100644 vendor/github.com/go-ole/go-ole/safearrayconversion.go create mode 100644 vendor/github.com/go-ole/go-ole/safearrayslices.go create mode 100644 vendor/github.com/go-ole/go-ole/utility.go create mode 100644 vendor/github.com/go-ole/go-ole/variables.go create mode 100644 vendor/github.com/go-ole/go-ole/variant.go create mode 100644 vendor/github.com/go-ole/go-ole/variant_386.go create mode 100644 vendor/github.com/go-ole/go-ole/variant_amd64.go create mode 100644 vendor/github.com/go-ole/go-ole/variant_arm.go create mode 100644 vendor/github.com/go-ole/go-ole/variant_arm64.go create mode 100644 vendor/github.com/go-ole/go-ole/variant_date_386.go create mode 100644 vendor/github.com/go-ole/go-ole/variant_date_amd64.go create mode 100644 vendor/github.com/go-ole/go-ole/variant_date_arm.go create mode 100644 vendor/github.com/go-ole/go-ole/variant_date_arm64.go create mode 100644 vendor/github.com/go-ole/go-ole/variant_ppc64le.go create mode 100644 vendor/github.com/go-ole/go-ole/variant_s390x.go create mode 100644 vendor/github.com/go-ole/go-ole/vt_string.go create mode 100644 vendor/github.com/go-ole/go-ole/winrt.go create mode 100644 vendor/github.com/go-ole/go-ole/winrt_doc.go create mode 100644 vendor/github.com/lufia/plan9stats/.gitignore create mode 100644 vendor/github.com/lufia/plan9stats/LICENSE create mode 100644 vendor/github.com/lufia/plan9stats/README.md create mode 100644 vendor/github.com/lufia/plan9stats/cpu.go create mode 100644 vendor/github.com/lufia/plan9stats/doc.go create mode 100644 vendor/github.com/lufia/plan9stats/host.go create mode 100644 vendor/github.com/lufia/plan9stats/int.go create mode 100644 vendor/github.com/lufia/plan9stats/opts.go create mode 100644 vendor/github.com/lufia/plan9stats/stats.go create mode 100644 vendor/github.com/power-devops/perfstat/LICENSE create mode 100644 vendor/github.com/power-devops/perfstat/c_helpers.c create mode 100644 vendor/github.com/power-devops/perfstat/c_helpers.h create mode 100644 vendor/github.com/power-devops/perfstat/config.go create mode 100644 vendor/github.com/power-devops/perfstat/cpustat.go create mode 100644 vendor/github.com/power-devops/perfstat/diskstat.go create mode 100644 vendor/github.com/power-devops/perfstat/doc.go create mode 100644 vendor/github.com/power-devops/perfstat/fsstat.go create mode 100644 vendor/github.com/power-devops/perfstat/helpers.go create mode 100644 vendor/github.com/power-devops/perfstat/lparstat.go create mode 100644 vendor/github.com/power-devops/perfstat/lvmstat.go create mode 100644 vendor/github.com/power-devops/perfstat/memstat.go create mode 100644 vendor/github.com/power-devops/perfstat/netstat.go create mode 100644 vendor/github.com/power-devops/perfstat/procstat.go create mode 100644 vendor/github.com/power-devops/perfstat/sysconf.go create mode 100644 vendor/github.com/power-devops/perfstat/systemcfg.go create mode 100644 vendor/github.com/power-devops/perfstat/types_cpu.go create mode 100644 vendor/github.com/power-devops/perfstat/types_disk.go create mode 100644 vendor/github.com/power-devops/perfstat/types_fs.go create mode 100644 vendor/github.com/power-devops/perfstat/types_lpar.go create mode 100644 vendor/github.com/power-devops/perfstat/types_lvm.go create mode 100644 vendor/github.com/power-devops/perfstat/types_memory.go create mode 100644 vendor/github.com/power-devops/perfstat/types_network.go create mode 100644 vendor/github.com/power-devops/perfstat/types_process.go create mode 100644 vendor/github.com/power-devops/perfstat/uptime.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/LICENSE create mode 100644 vendor/github.com/shirou/gopsutil/v4/common/env.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_cgo.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_nocgo.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/common.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_cgo.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_nocgo.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/net/net.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/net/net_aix.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/net/net_linux.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/net/net_unix.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/net/net_windows.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_darwin_cgo.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_darwin_nocgo.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_linux.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_posix.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_windows.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go create mode 100644 vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go create mode 100644 vendor/github.com/shoenig/go-m1cpu/.golangci.yaml create mode 100644 vendor/github.com/shoenig/go-m1cpu/LICENSE create mode 100644 vendor/github.com/shoenig/go-m1cpu/Makefile create mode 100644 vendor/github.com/shoenig/go-m1cpu/README.md create mode 100644 vendor/github.com/shoenig/go-m1cpu/cpu.go create mode 100644 vendor/github.com/shoenig/go-m1cpu/incompatible.go create mode 100644 vendor/github.com/tklauser/go-sysconf/.cirrus.yml create mode 100644 vendor/github.com/tklauser/go-sysconf/.gitignore create mode 100644 vendor/github.com/tklauser/go-sysconf/LICENSE create mode 100644 vendor/github.com/tklauser/go-sysconf/README.md create mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf.go create mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_bsd.go create mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go create mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_dragonfly.go create mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_freebsd.go create mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_generic.go create mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_linux.go create mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go create mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_openbsd.go create mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_posix.go create mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_solaris.go create mode 100644 vendor/github.com/tklauser/go-sysconf/sysconf_unsupported.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_defs_dragonfly.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_defs_freebsd.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_defs_linux.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_defs_solaris.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_386.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_amd64.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm64.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_386.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_amd64.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm64.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64le.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mipsle.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64le.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_riscv64.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_s390x.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go create mode 100644 vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go create mode 100644 vendor/github.com/tklauser/numcpus/.cirrus.yml create mode 100644 vendor/github.com/tklauser/numcpus/LICENSE create mode 100644 vendor/github.com/tklauser/numcpus/README.md create mode 100644 vendor/github.com/tklauser/numcpus/numcpus.go create mode 100644 vendor/github.com/tklauser/numcpus/numcpus_bsd.go create mode 100644 vendor/github.com/tklauser/numcpus/numcpus_linux.go create mode 100644 vendor/github.com/tklauser/numcpus/numcpus_solaris.go create mode 100644 vendor/github.com/tklauser/numcpus/numcpus_unsupported.go create mode 100644 vendor/github.com/tklauser/numcpus/numcpus_windows.go create mode 100644 vendor/github.com/yusufpapurcu/wmi/LICENSE create mode 100644 vendor/github.com/yusufpapurcu/wmi/README.md create mode 100644 vendor/github.com/yusufpapurcu/wmi/swbemservices.go create mode 100644 vendor/github.com/yusufpapurcu/wmi/wmi.go diff --git a/go.mod b/go.mod index f1779c1be01bc..470934fedcafc 100644 --- a/go.mod +++ b/go.mod @@ -134,6 +134,7 @@ require ( github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/common/sigv4 v0.1.0 github.com/richardartoul/molecule v1.0.0 + github.com/shirou/gopsutil/v4 v4.24.0-alpha.1 github.com/thanos-io/objstore v0.0.0-20230829152104-1b257a36f9a3 github.com/willf/bloom v2.0.3+incompatible go.opentelemetry.io/collector/pdata v1.3.0 @@ -147,7 +148,16 @@ require ( k8s.io/utils v0.0.0-20230726121419-3b25d923346b ) -require github.com/dlclark/regexp2 v1.4.0 // indirect +require ( + github.com/dlclark/regexp2 v1.4.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect +) require ( cloud.google.com/go v0.112.0 // indirect diff --git a/go.sum b/go.sum index df44df356fc8f..76288a35335ab 100644 --- a/go.sum +++ b/go.sum @@ -664,6 +664,7 @@ github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ4 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -1318,6 +1319,7 @@ github.com/linode/linodego v1.29.0 h1:gDSQWAbKMAQX8db9FDCXHhodQPrJmLcmthjx6m+PyV github.com/linode/linodego v1.29.0/go.mod h1:3k6WvCM10gillgYcnoLqIL23ST27BD9HhMsCJWb3Bpk= github.com/liquidweb/liquidweb-go v1.6.0/go.mod h1:UDcVnAMDkZxpw4Y7NOHkqoeiGacVLEIG/i5J9cyixzQ= github.com/lucas-clemente/quic-go v0.13.1/go.mod h1:Vn3/Fb0/77b02SGhQk36KzOUmXgVpFfizUfW5WMaqyU= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -1551,6 +1553,7 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I= @@ -1658,6 +1661,12 @@ github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYM github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.22.8/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI= +github.com/shirou/gopsutil/v4 v4.24.0-alpha.1 h1:lLPAdP4TpfgJ5byoc3EFwNSKZj8kCnDFHtuWTktWl0s= +github.com/shirou/gopsutil/v4 v4.24.0-alpha.1/go.mod h1:GVpYUxBee6CTWux2/JslZ7fYPwqkQ8YDJSXmGAryYy4= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= @@ -1748,7 +1757,11 @@ github.com/timewasted/linode v0.0.0-20160829202747-37e84520dcf7/go.mod h1:imsgLp github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 h1:hbyjqt5UnyKeOT3rFVxLxi7iTI6XqR2p4TkwEAQdUiw= @@ -1814,6 +1827,8 @@ github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29Xrm github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.einride.tech/aip v0.66.0 h1:XfV+NQX6L7EOYK11yoHHFtndeaWh3KbD9/cN/6iWEt8= go.einride.tech/aip v0.66.0/go.mod h1:qAhMsfT7plxBX+Oy7Huol6YUvZ0ZzdUz26yZsQwfl1M= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -2240,6 +2255,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/pkg/analytics/reporter.go b/pkg/analytics/reporter.go index d58e727aac7fb..7daa352259f28 100644 --- a/pkg/analytics/reporter.go +++ b/pkg/analytics/reporter.go @@ -7,6 +7,7 @@ import ( "flag" "io" "math" + "os" "time" "github.com/go-kit/log" @@ -20,6 +21,8 @@ import ( "github.com/grafana/loki/v3/pkg/storage/chunk/client" "github.com/grafana/loki/v3/pkg/util/build" + + "github.com/shirou/gopsutil/v4/process" ) const ( @@ -259,6 +262,7 @@ func (rep *Reporter) running(ctx context.Context) error { } return nil } + rep.startCPUPercentCollection(ctx) // check every minute if we should report. ticker := time.NewTicker(reportCheckInterval) defer ticker.Stop() @@ -313,6 +317,39 @@ func (rep *Reporter) reportUsage(ctx context.Context, interval time.Time) error return errs.Err() } +var ( + cpuUsageKey = "cpu_usage" + cpuUsage = NewFloat(cpuUsageKey) + cpuCollectionInterval = time.Minute +) + +func (rep *Reporter) startCPUPercentCollection(ctx context.Context) { + proc, err := process.NewProcess(int32(os.Getpid())) + if err != nil { + level.Debug(rep.logger).Log("msg", "failed to get process", "err", err) + return + } + go func() { + for { + select { + case <-ctx.Done(): + return + default: + percent, err := proc.CPUPercentWithContext(ctx) + if err != nil { + level.Debug(rep.logger).Log("msg", "failed to get cpu percent", "err", err) + } else { + if cpuUsage.Value() < percent { + cpuUsage.Set(percent) + } + } + + } + time.Sleep(cpuCollectionInterval) + } + }() +} + // nextReport compute the next report time based on the interval. // The interval is based off the creation of the cluster seed to avoid all cluster reporting at the same time. func nextReport(interval time.Duration, createdAt, now time.Time) time.Time { diff --git a/pkg/analytics/reporter_test.go b/pkg/analytics/reporter_test.go index a986ac66de05d..140953e70700e 100644 --- a/pkg/analytics/reporter_test.go +++ b/pkg/analytics/reporter_test.go @@ -157,3 +157,17 @@ func TestWrongKV(t *testing.T) { }() require.Equal(t, nil, r.running(ctx)) } + +func TestStartCPUCollection(t *testing.T) { + cpuCollectionInterval = 1 * time.Second + r, err := NewReporter(Config{Leader: true, Enabled: true}, kv.Config{ + Store: "inmemory", + }, nil, log.NewLogfmtLogger(os.Stdout), prometheus.NewPedanticRegistry()) + require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + r.startCPUPercentCollection(ctx) + require.Eventually(t, func() bool { + return cpuUsage.Value() > 0 + }, 5*time.Second, 1*time.Second) +} diff --git a/pkg/analytics/stats.go b/pkg/analytics/stats.go index e4ea068f0cabb..433a4198c4a1b 100644 --- a/pkg/analytics/stats.go +++ b/pkg/analytics/stats.go @@ -136,9 +136,13 @@ func buildMetrics() map[string]interface{} { "memstats": memstats(), "num_cpu": runtime.NumCPU(), "num_goroutine": runtime.NumGoroutine(), + // the highest recorded cpu usage over the interval + "cpu_usage": cpuUsage.Value(), } + // reset cpu usage + cpuUsage.Set(0) expvar.Do(func(kv expvar.KeyValue) { - if !strings.HasPrefix(kv.Key, statsPrefix) || kv.Key == statsPrefix+targetKey || kv.Key == statsPrefix+editionKey { + if !strings.HasPrefix(kv.Key, statsPrefix) || kv.Key == statsPrefix+targetKey || kv.Key == statsPrefix+editionKey || kv.Key == statsPrefix+cpuUsageKey { return } var value interface{} diff --git a/vendor/github.com/go-ole/go-ole/.travis.yml b/vendor/github.com/go-ole/go-ole/.travis.yml new file mode 100644 index 0000000000000..28f740cd5d0a6 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/.travis.yml @@ -0,0 +1,8 @@ +language: go +sudo: false + +go: + - 1.9.x + - 1.10.x + - 1.11.x + - tip diff --git a/vendor/github.com/go-ole/go-ole/ChangeLog.md b/vendor/github.com/go-ole/go-ole/ChangeLog.md new file mode 100644 index 0000000000000..4ba6a8c64d00b --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ChangeLog.md @@ -0,0 +1,49 @@ +# Version 1.x.x + +* **Add more test cases and reference new test COM server project.** (Placeholder for future additions) + +# Version 1.2.0-alphaX + +**Minimum supported version is now Go 1.4. Go 1.1 support is deprecated, but should still build.** + + * Added CI configuration for Travis-CI and AppVeyor. + * Added test InterfaceID and ClassID for the COM Test Server project. + * Added more inline documentation (#83). + * Added IEnumVARIANT implementation (#88). + * Added IEnumVARIANT test cases (#99, #100, #101). + * Added support for retrieving `time.Time` from VARIANT (#92). + * Added test case for IUnknown (#64). + * Added test case for IDispatch (#64). + * Added test cases for scalar variants (#64, #76). + +# Version 1.1.1 + + * Fixes for Linux build. + * Fixes for Windows build. + +# Version 1.1.0 + +The change to provide building on all platforms is a new feature. The increase in minor version reflects that and allows those who wish to stay on 1.0.x to continue to do so. Support for 1.0.x will be limited to bug fixes. + + * Move GUID out of variables.go into its own file to make new documentation available. + * Move OleError out of ole.go into its own file to make new documentation available. + * Add documentation to utility functions. + * Add documentation to variant receiver functions. + * Add documentation to ole structures. + * Make variant available to other systems outside of Windows. + * Make OLE structures available to other systems outside of Windows. + +## New Features + + * Library should now be built on all platforms supported by Go. Library will NOOP on any platform that is not Windows. + * More functions are now documented and available on godoc.org. + +# Version 1.0.1 + + 1. Fix package references from repository location change. + +# Version 1.0.0 + +This version is stable enough for use. The COM API is still incomplete, but provides enough functionality for accessing COM servers using IDispatch interface. + +There is no changelog for this version. Check commits for history. diff --git a/vendor/github.com/go-ole/go-ole/LICENSE b/vendor/github.com/go-ole/go-ole/LICENSE new file mode 100644 index 0000000000000..623ec06f91cac --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright © 2013-2017 Yasuhiro Matsumoto, + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-ole/go-ole/README.md b/vendor/github.com/go-ole/go-ole/README.md new file mode 100644 index 0000000000000..7b577558d1cd6 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/README.md @@ -0,0 +1,46 @@ +# Go OLE + +[![Build status](https://ci.appveyor.com/api/projects/status/qr0u2sf7q43us9fj?svg=true)](https://ci.appveyor.com/project/jacobsantos/go-ole-jgs28) +[![Build Status](https://travis-ci.org/go-ole/go-ole.svg?branch=master)](https://travis-ci.org/go-ole/go-ole) +[![GoDoc](https://godoc.org/github.com/go-ole/go-ole?status.svg)](https://godoc.org/github.com/go-ole/go-ole) + +Go bindings for Windows COM using shared libraries instead of cgo. + +By Yasuhiro Matsumoto. + +## Install + +To experiment with go-ole, you can just compile and run the example program: + +``` +go get github.com/go-ole/go-ole +cd /path/to/go-ole/ +go test + +cd /path/to/go-ole/example/excel +go run excel.go +``` + +## Continuous Integration + +Continuous integration configuration has been added for both Travis-CI and AppVeyor. You will have to add these to your own account for your fork in order for it to run. + +**Travis-CI** + +Travis-CI was added to check builds on Linux to ensure that `go get` works when cross building. Currently, Travis-CI is not used to test cross-building, but this may be changed in the future. It is also not currently possible to test the library on Linux, since COM API is specific to Windows and it is not currently possible to run a COM server on Linux or even connect to a remote COM server. + +**AppVeyor** + +AppVeyor is used to build on Windows using the (in-development) test COM server. It is currently only used to test the build and ensure that the code works on Windows. It will be used to register a COM server and then run the test cases based on the test COM server. + +The tests currently do run and do pass and this should be maintained with commits. + +## Versioning + +Go OLE uses [semantic versioning](http://semver.org) for version numbers, which is similar to the version contract of the Go language. Which means that the major version will always maintain backwards compatibility with minor versions. Minor versions will only add new additions and changes. Fixes will always be in patch. + +This contract should allow you to upgrade to new minor and patch versions without breakage or modifications to your existing code. Leave a ticket, if there is breakage, so that it could be fixed. + +## LICENSE + +Under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/go-ole/go-ole/appveyor.yml b/vendor/github.com/go-ole/go-ole/appveyor.yml new file mode 100644 index 0000000000000..0d557ac2ff556 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/appveyor.yml @@ -0,0 +1,54 @@ +# Notes: +# - Minimal appveyor.yml file is an empty file. All sections are optional. +# - Indent each level of configuration with 2 spaces. Do not use tabs! +# - All section names are case-sensitive. +# - Section names should be unique on each level. + +version: "1.3.0.{build}-alpha-{branch}" + +os: Windows Server 2012 R2 + +branches: + only: + - master + - v1.2 + - v1.1 + - v1.0 + +skip_tags: true + +clone_folder: c:\gopath\src\github.com\go-ole\go-ole + +environment: + GOPATH: c:\gopath + matrix: + - GOARCH: amd64 + GOVERSION: 1.5 + GOROOT: c:\go + DOWNLOADPLATFORM: "x64" + +install: + - choco install mingw + - SET PATH=c:\tools\mingw64\bin;%PATH% + # - Download COM Server + - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip" + - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL + - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat + # - set + - go version + - go env + - go get -u golang.org/x/tools/cmd/cover + - go get -u golang.org/x/tools/cmd/godoc + - go get -u golang.org/x/tools/cmd/stringer + +build_script: + - cd c:\gopath\src\github.com\go-ole\go-ole + - go get -v -t ./... + - go build + - go test -v -cover ./... + +# disable automatic tests +test: off + +# disable deployment +deploy: off diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go new file mode 100644 index 0000000000000..a9bef150a3225 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/com.go @@ -0,0 +1,344 @@ +// +build windows + +package ole + +import ( + "syscall" + "unicode/utf16" + "unsafe" +) + +var ( + procCoInitialize = modole32.NewProc("CoInitialize") + procCoInitializeEx = modole32.NewProc("CoInitializeEx") + procCoUninitialize = modole32.NewProc("CoUninitialize") + procCoCreateInstance = modole32.NewProc("CoCreateInstance") + procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") + procCLSIDFromProgID = modole32.NewProc("CLSIDFromProgID") + procCLSIDFromString = modole32.NewProc("CLSIDFromString") + procStringFromCLSID = modole32.NewProc("StringFromCLSID") + procStringFromIID = modole32.NewProc("StringFromIID") + procIIDFromString = modole32.NewProc("IIDFromString") + procCoGetObject = modole32.NewProc("CoGetObject") + procGetUserDefaultLCID = modkernel32.NewProc("GetUserDefaultLCID") + procCopyMemory = modkernel32.NewProc("RtlMoveMemory") + procVariantInit = modoleaut32.NewProc("VariantInit") + procVariantClear = modoleaut32.NewProc("VariantClear") + procVariantTimeToSystemTime = modoleaut32.NewProc("VariantTimeToSystemTime") + procSysAllocString = modoleaut32.NewProc("SysAllocString") + procSysAllocStringLen = modoleaut32.NewProc("SysAllocStringLen") + procSysFreeString = modoleaut32.NewProc("SysFreeString") + procSysStringLen = modoleaut32.NewProc("SysStringLen") + procCreateDispTypeInfo = modoleaut32.NewProc("CreateDispTypeInfo") + procCreateStdDispatch = modoleaut32.NewProc("CreateStdDispatch") + procGetActiveObject = modoleaut32.NewProc("GetActiveObject") + + procGetMessageW = moduser32.NewProc("GetMessageW") + procDispatchMessageW = moduser32.NewProc("DispatchMessageW") +) + +// coInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func coInitialize() (err error) { + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms678543(v=vs.85).aspx + // Suggests that no value should be passed to CoInitialized. + // Could just be Call() since the parameter is optional. <-- Needs testing to be sure. + hr, _, _ := procCoInitialize.Call(uintptr(0)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// coInitializeEx initializes COM library with concurrency model. +func coInitializeEx(coinit uint32) (err error) { + // http://msdn.microsoft.com/en-us/library/windows/desktop/ms695279(v=vs.85).aspx + // Suggests that the first parameter is not only optional but should always be NULL. + hr, _, _ := procCoInitializeEx.Call(uintptr(0), uintptr(coinit)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// CoInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func CoInitialize(p uintptr) (err error) { + // p is ignored and won't be used. + // Avoid any variable not used errors. + p = uintptr(0) + return coInitialize() +} + +// CoInitializeEx initializes COM library with concurrency model. +func CoInitializeEx(p uintptr, coinit uint32) (err error) { + // Avoid any variable not used errors. + p = uintptr(0) + return coInitializeEx(coinit) +} + +// CoUninitialize uninitializes COM Library. +func CoUninitialize() { + procCoUninitialize.Call() +} + +// CoTaskMemFree frees memory pointer. +func CoTaskMemFree(memptr uintptr) { + procCoTaskMemFree.Call(memptr) +} + +// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. +// +// The Programmatic Identifier must be registered, because it will be looked up +// in the Windows Registry. The registry entry has the following keys: CLSID, +// Insertable, Protocol and Shell +// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). +// +// programID identifies the class id with less precision and is not guaranteed +// to be unique. These are usually found in the registry under +// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of +// "Program.Component.Version" with version being optional. +// +// CLSIDFromProgID in Windows API. +func CLSIDFromProgID(progId string) (clsid *GUID, err error) { + var guid GUID + lpszProgID := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) + hr, _, _ := procCLSIDFromProgID.Call(lpszProgID, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// CLSIDFromString retrieves Class ID from string representation. +// +// This is technically the string version of the GUID and will convert the +// string to object. +// +// CLSIDFromString in Windows API. +func CLSIDFromString(str string) (clsid *GUID, err error) { + var guid GUID + lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(str))) + hr, _, _ := procCLSIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// StringFromCLSID returns GUID formated string from GUID object. +func StringFromCLSID(clsid *GUID) (str string, err error) { + var p *uint16 + hr, _, _ := procStringFromCLSID.Call(uintptr(unsafe.Pointer(clsid)), uintptr(unsafe.Pointer(&p))) + if hr != 0 { + err = NewError(hr) + } + str = LpOleStrToString(p) + return +} + +// IIDFromString returns GUID from program ID. +func IIDFromString(progId string) (clsid *GUID, err error) { + var guid GUID + lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) + hr, _, _ := procIIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) + if hr != 0 { + err = NewError(hr) + } + clsid = &guid + return +} + +// StringFromIID returns GUID formatted string from GUID object. +func StringFromIID(iid *GUID) (str string, err error) { + var p *uint16 + hr, _, _ := procStringFromIID.Call(uintptr(unsafe.Pointer(iid)), uintptr(unsafe.Pointer(&p))) + if hr != 0 { + err = NewError(hr) + } + str = LpOleStrToString(p) + return +} + +// CreateInstance of single uninitialized object with GUID. +func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procCoCreateInstance.Call( + uintptr(unsafe.Pointer(clsid)), + 0, + CLSCTX_SERVER, + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// GetActiveObject retrieves pointer to active object. +func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procGetActiveObject.Call( + uintptr(unsafe.Pointer(clsid)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + +type BindOpts struct { + CbStruct uint32 + GrfFlags uint32 + GrfMode uint32 + TickCountDeadline uint32 +} + +// GetObject retrieves pointer to active object. +func GetObject(programID string, bindOpts *BindOpts, iid *GUID) (unk *IUnknown, err error) { + if bindOpts != nil { + bindOpts.CbStruct = uint32(unsafe.Sizeof(BindOpts{})) + } + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procCoGetObject.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(programID))), + uintptr(unsafe.Pointer(bindOpts)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// VariantInit initializes variant. +func VariantInit(v *VARIANT) (err error) { + hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// VariantClear clears value in Variant settings to VT_EMPTY. +func VariantClear(v *VARIANT) (err error) { + hr, _, _ := procVariantClear.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// SysAllocString allocates memory for string and copies string into memory. +func SysAllocString(v string) (ss *int16) { + pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v)))) + ss = (*int16)(unsafe.Pointer(pss)) + return +} + +// SysAllocStringLen copies up to length of given string returning pointer. +func SysAllocStringLen(v string) (ss *int16) { + utf16 := utf16.Encode([]rune(v + "\x00")) + ptr := &utf16[0] + + pss, _, _ := procSysAllocStringLen.Call(uintptr(unsafe.Pointer(ptr)), uintptr(len(utf16)-1)) + ss = (*int16)(unsafe.Pointer(pss)) + return +} + +// SysFreeString frees string system memory. This must be called with SysAllocString. +func SysFreeString(v *int16) (err error) { + hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// SysStringLen is the length of the system allocated string. +func SysStringLen(v *int16) uint32 { + l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v))) + return uint32(l) +} + +// CreateStdDispatch provides default IDispatch implementation for IUnknown. +// +// This handles default IDispatch implementation for objects. It haves a few +// limitations with only supporting one language. It will also only return +// default exception codes. +func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispatch, err error) { + hr, _, _ := procCreateStdDispatch.Call( + uintptr(unsafe.Pointer(unk)), + v, + uintptr(unsafe.Pointer(ptinfo)), + uintptr(unsafe.Pointer(&disp))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. +// +// This will not handle the full implementation of the interface. +func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) { + hr, _, _ := procCreateDispTypeInfo.Call( + uintptr(unsafe.Pointer(idata)), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&pptinfo))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// copyMemory moves location of a block of memory. +func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) { + procCopyMemory.Call(uintptr(dest), uintptr(src), uintptr(length)) +} + +// GetUserDefaultLCID retrieves current user default locale. +func GetUserDefaultLCID() (lcid uint32) { + ret, _, _ := procGetUserDefaultLCID.Call() + lcid = uint32(ret) + return +} + +// GetMessage in message queue from runtime. +// +// This function appears to block. PeekMessage does not block. +func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, err error) { + r0, _, err := procGetMessageW.Call(uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax)) + ret = int32(r0) + return +} + +// DispatchMessage to window procedure. +func DispatchMessage(msg *Msg) (ret int32) { + r0, _, _ := procDispatchMessageW.Call(uintptr(unsafe.Pointer(msg))) + ret = int32(r0) + return +} diff --git a/vendor/github.com/go-ole/go-ole/com_func.go b/vendor/github.com/go-ole/go-ole/com_func.go new file mode 100644 index 0000000000000..cef539d9ddd6a --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/com_func.go @@ -0,0 +1,174 @@ +// +build !windows + +package ole + +import ( + "time" + "unsafe" +) + +// coInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func coInitialize() error { + return NewError(E_NOTIMPL) +} + +// coInitializeEx initializes COM library with concurrency model. +func coInitializeEx(coinit uint32) error { + return NewError(E_NOTIMPL) +} + +// CoInitialize initializes COM library on current thread. +// +// MSDN documentation suggests that this function should not be called. Call +// CoInitializeEx() instead. The reason has to do with threading and this +// function is only for single-threaded apartments. +// +// That said, most users of the library have gotten away with just this +// function. If you are experiencing threading issues, then use +// CoInitializeEx(). +func CoInitialize(p uintptr) error { + return NewError(E_NOTIMPL) +} + +// CoInitializeEx initializes COM library with concurrency model. +func CoInitializeEx(p uintptr, coinit uint32) error { + return NewError(E_NOTIMPL) +} + +// CoUninitialize uninitializes COM Library. +func CoUninitialize() {} + +// CoTaskMemFree frees memory pointer. +func CoTaskMemFree(memptr uintptr) {} + +// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. +// +// The Programmatic Identifier must be registered, because it will be looked up +// in the Windows Registry. The registry entry has the following keys: CLSID, +// Insertable, Protocol and Shell +// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). +// +// programID identifies the class id with less precision and is not guaranteed +// to be unique. These are usually found in the registry under +// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of +// "Program.Component.Version" with version being optional. +// +// CLSIDFromProgID in Windows API. +func CLSIDFromProgID(progId string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// CLSIDFromString retrieves Class ID from string representation. +// +// This is technically the string version of the GUID and will convert the +// string to object. +// +// CLSIDFromString in Windows API. +func CLSIDFromString(str string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// StringFromCLSID returns GUID formated string from GUID object. +func StringFromCLSID(clsid *GUID) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// IIDFromString returns GUID from program ID. +func IIDFromString(progId string) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// StringFromIID returns GUID formatted string from GUID object. +func StringFromIID(iid *GUID) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// CreateInstance of single uninitialized object with GUID. +func CreateInstance(clsid *GUID, iid *GUID) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// GetActiveObject retrieves pointer to active object. +func GetActiveObject(clsid *GUID, iid *GUID) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// VariantInit initializes variant. +func VariantInit(v *VARIANT) error { + return NewError(E_NOTIMPL) +} + +// VariantClear clears value in Variant settings to VT_EMPTY. +func VariantClear(v *VARIANT) error { + return NewError(E_NOTIMPL) +} + +// SysAllocString allocates memory for string and copies string into memory. +func SysAllocString(v string) *int16 { + u := int16(0) + return &u +} + +// SysAllocStringLen copies up to length of given string returning pointer. +func SysAllocStringLen(v string) *int16 { + u := int16(0) + return &u +} + +// SysFreeString frees string system memory. This must be called with SysAllocString. +func SysFreeString(v *int16) error { + return NewError(E_NOTIMPL) +} + +// SysStringLen is the length of the system allocated string. +func SysStringLen(v *int16) uint32 { + return uint32(0) +} + +// CreateStdDispatch provides default IDispatch implementation for IUnknown. +// +// This handles default IDispatch implementation for objects. It haves a few +// limitations with only supporting one language. It will also only return +// default exception codes. +func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (*IDispatch, error) { + return nil, NewError(E_NOTIMPL) +} + +// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. +// +// This will not handle the full implementation of the interface. +func CreateDispTypeInfo(idata *INTERFACEDATA) (*IUnknown, error) { + return nil, NewError(E_NOTIMPL) +} + +// copyMemory moves location of a block of memory. +func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {} + +// GetUserDefaultLCID retrieves current user default locale. +func GetUserDefaultLCID() uint32 { + return uint32(0) +} + +// GetMessage in message queue from runtime. +// +// This function appears to block. PeekMessage does not block. +func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) +} + +// DispatchMessage to window procedure. +func DispatchMessage(msg *Msg) int32 { + return int32(0) +} + +func GetVariantDate(value uint64) (time.Time, error) { + return time.Now(), NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/connect.go b/vendor/github.com/go-ole/go-ole/connect.go new file mode 100644 index 0000000000000..b2ac2ec67ac9f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/connect.go @@ -0,0 +1,192 @@ +package ole + +// Connection contains IUnknown for fluent interface interaction. +// +// Deprecated. Use oleutil package instead. +type Connection struct { + Object *IUnknown // Access COM +} + +// Initialize COM. +func (*Connection) Initialize() (err error) { + return coInitialize() +} + +// Uninitialize COM. +func (*Connection) Uninitialize() { + CoUninitialize() +} + +// Create IUnknown object based first on ProgId and then from String. +func (c *Connection) Create(progId string) (err error) { + var clsid *GUID + clsid, err = CLSIDFromProgID(progId) + if err != nil { + clsid, err = CLSIDFromString(progId) + if err != nil { + return + } + } + + unknown, err := CreateInstance(clsid, IID_IUnknown) + if err != nil { + return + } + c.Object = unknown + + return +} + +// Release IUnknown object. +func (c *Connection) Release() { + c.Object.Release() +} + +// Load COM object from list of programIDs or strings. +func (c *Connection) Load(names ...string) (errors []error) { + var tempErrors []error = make([]error, len(names)) + var numErrors int = 0 + for _, name := range names { + err := c.Create(name) + if err != nil { + tempErrors = append(tempErrors, err) + numErrors += 1 + continue + } + break + } + + copy(errors, tempErrors[0:numErrors]) + return +} + +// Dispatch returns Dispatch object. +func (c *Connection) Dispatch() (object *Dispatch, err error) { + dispatch, err := c.Object.QueryInterface(IID_IDispatch) + if err != nil { + return + } + object = &Dispatch{dispatch} + return +} + +// Dispatch stores IDispatch object. +type Dispatch struct { + Object *IDispatch // Dispatch object. +} + +// Call method on IDispatch with parameters. +func (d *Dispatch) Call(method string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(method) + if err != nil { + return + } + + result, err = d.Invoke(id, DISPATCH_METHOD, params) + return +} + +// MustCall method on IDispatch with parameters. +func (d *Dispatch) MustCall(method string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(method) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_METHOD, params) + if err != nil { + panic(err) + } + + return +} + +// Get property on IDispatch with parameters. +func (d *Dispatch) Get(name string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(name) + if err != nil { + return + } + result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) + return +} + +// MustGet property on IDispatch with parameters. +func (d *Dispatch) MustGet(name string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(name) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) + if err != nil { + panic(err) + } + return +} + +// Set property on IDispatch with parameters. +func (d *Dispatch) Set(name string, params ...interface{}) (result *VARIANT, err error) { + id, err := d.GetId(name) + if err != nil { + return + } + result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) + return +} + +// MustSet property on IDispatch with parameters. +func (d *Dispatch) MustSet(name string, params ...interface{}) (result *VARIANT) { + id, err := d.GetId(name) + if err != nil { + panic(err) + } + + result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) + if err != nil { + panic(err) + } + return +} + +// GetId retrieves ID of name on IDispatch. +func (d *Dispatch) GetId(name string) (id int32, err error) { + var dispid []int32 + dispid, err = d.Object.GetIDsOfName([]string{name}) + if err != nil { + return + } + id = dispid[0] + return +} + +// GetIds retrieves all IDs of names on IDispatch. +func (d *Dispatch) GetIds(names ...string) (dispid []int32, err error) { + dispid, err = d.Object.GetIDsOfName(names) + return +} + +// Invoke IDispatch on DisplayID of dispatch type with parameters. +// +// There have been problems where if send cascading params..., it would error +// out because the parameters would be empty. +func (d *Dispatch) Invoke(id int32, dispatch int16, params []interface{}) (result *VARIANT, err error) { + if len(params) < 1 { + result, err = d.Object.Invoke(id, dispatch) + } else { + result, err = d.Object.Invoke(id, dispatch, params...) + } + return +} + +// Release IDispatch object. +func (d *Dispatch) Release() { + d.Object.Release() +} + +// Connect initializes COM and attempts to load IUnknown based on given names. +func Connect(names ...string) (connection *Connection) { + connection.Initialize() + connection.Load(names...) + return +} diff --git a/vendor/github.com/go-ole/go-ole/constants.go b/vendor/github.com/go-ole/go-ole/constants.go new file mode 100644 index 0000000000000..fd0c6d74b0e9a --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/constants.go @@ -0,0 +1,153 @@ +package ole + +const ( + CLSCTX_INPROC_SERVER = 1 + CLSCTX_INPROC_HANDLER = 2 + CLSCTX_LOCAL_SERVER = 4 + CLSCTX_INPROC_SERVER16 = 8 + CLSCTX_REMOTE_SERVER = 16 + CLSCTX_ALL = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER + CLSCTX_INPROC = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER + CLSCTX_SERVER = CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER +) + +const ( + COINIT_APARTMENTTHREADED = 0x2 + COINIT_MULTITHREADED = 0x0 + COINIT_DISABLE_OLE1DDE = 0x4 + COINIT_SPEED_OVER_MEMORY = 0x8 +) + +const ( + DISPATCH_METHOD = 1 + DISPATCH_PROPERTYGET = 2 + DISPATCH_PROPERTYPUT = 4 + DISPATCH_PROPERTYPUTREF = 8 +) + +const ( + S_OK = 0x00000000 + E_UNEXPECTED = 0x8000FFFF + E_NOTIMPL = 0x80004001 + E_OUTOFMEMORY = 0x8007000E + E_INVALIDARG = 0x80070057 + E_NOINTERFACE = 0x80004002 + E_POINTER = 0x80004003 + E_HANDLE = 0x80070006 + E_ABORT = 0x80004004 + E_FAIL = 0x80004005 + E_ACCESSDENIED = 0x80070005 + E_PENDING = 0x8000000A + + CO_E_CLASSSTRING = 0x800401F3 +) + +const ( + CC_FASTCALL = iota + CC_CDECL + CC_MSCPASCAL + CC_PASCAL = CC_MSCPASCAL + CC_MACPASCAL + CC_STDCALL + CC_FPFASTCALL + CC_SYSCALL + CC_MPWCDECL + CC_MPWPASCAL + CC_MAX = CC_MPWPASCAL +) + +type VT uint16 + +const ( + VT_EMPTY VT = 0x0 + VT_NULL VT = 0x1 + VT_I2 VT = 0x2 + VT_I4 VT = 0x3 + VT_R4 VT = 0x4 + VT_R8 VT = 0x5 + VT_CY VT = 0x6 + VT_DATE VT = 0x7 + VT_BSTR VT = 0x8 + VT_DISPATCH VT = 0x9 + VT_ERROR VT = 0xa + VT_BOOL VT = 0xb + VT_VARIANT VT = 0xc + VT_UNKNOWN VT = 0xd + VT_DECIMAL VT = 0xe + VT_I1 VT = 0x10 + VT_UI1 VT = 0x11 + VT_UI2 VT = 0x12 + VT_UI4 VT = 0x13 + VT_I8 VT = 0x14 + VT_UI8 VT = 0x15 + VT_INT VT = 0x16 + VT_UINT VT = 0x17 + VT_VOID VT = 0x18 + VT_HRESULT VT = 0x19 + VT_PTR VT = 0x1a + VT_SAFEARRAY VT = 0x1b + VT_CARRAY VT = 0x1c + VT_USERDEFINED VT = 0x1d + VT_LPSTR VT = 0x1e + VT_LPWSTR VT = 0x1f + VT_RECORD VT = 0x24 + VT_INT_PTR VT = 0x25 + VT_UINT_PTR VT = 0x26 + VT_FILETIME VT = 0x40 + VT_BLOB VT = 0x41 + VT_STREAM VT = 0x42 + VT_STORAGE VT = 0x43 + VT_STREAMED_OBJECT VT = 0x44 + VT_STORED_OBJECT VT = 0x45 + VT_BLOB_OBJECT VT = 0x46 + VT_CF VT = 0x47 + VT_CLSID VT = 0x48 + VT_BSTR_BLOB VT = 0xfff + VT_VECTOR VT = 0x1000 + VT_ARRAY VT = 0x2000 + VT_BYREF VT = 0x4000 + VT_RESERVED VT = 0x8000 + VT_ILLEGAL VT = 0xffff + VT_ILLEGALMASKED VT = 0xfff + VT_TYPEMASK VT = 0xfff +) + +const ( + DISPID_UNKNOWN = -1 + DISPID_VALUE = 0 + DISPID_PROPERTYPUT = -3 + DISPID_NEWENUM = -4 + DISPID_EVALUATE = -5 + DISPID_CONSTRUCTOR = -6 + DISPID_DESTRUCTOR = -7 + DISPID_COLLECT = -8 +) + +const ( + TKIND_ENUM = 1 + TKIND_RECORD = 2 + TKIND_MODULE = 3 + TKIND_INTERFACE = 4 + TKIND_DISPATCH = 5 + TKIND_COCLASS = 6 + TKIND_ALIAS = 7 + TKIND_UNION = 8 + TKIND_MAX = 9 +) + +// Safe Array Feature Flags + +const ( + FADF_AUTO = 0x0001 + FADF_STATIC = 0x0002 + FADF_EMBEDDED = 0x0004 + FADF_FIXEDSIZE = 0x0010 + FADF_RECORD = 0x0020 + FADF_HAVEIID = 0x0040 + FADF_HAVEVARTYPE = 0x0080 + FADF_BSTR = 0x0100 + FADF_UNKNOWN = 0x0200 + FADF_DISPATCH = 0x0400 + FADF_VARIANT = 0x0800 + FADF_RESERVED = 0xF008 +) diff --git a/vendor/github.com/go-ole/go-ole/error.go b/vendor/github.com/go-ole/go-ole/error.go new file mode 100644 index 0000000000000..096b456d3a1fc --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error.go @@ -0,0 +1,51 @@ +package ole + +// OleError stores COM errors. +type OleError struct { + hr uintptr + description string + subError error +} + +// NewError creates new error with HResult. +func NewError(hr uintptr) *OleError { + return &OleError{hr: hr} +} + +// NewErrorWithDescription creates new COM error with HResult and description. +func NewErrorWithDescription(hr uintptr, description string) *OleError { + return &OleError{hr: hr, description: description} +} + +// NewErrorWithSubError creates new COM error with parent error. +func NewErrorWithSubError(hr uintptr, description string, err error) *OleError { + return &OleError{hr: hr, description: description, subError: err} +} + +// Code is the HResult. +func (v *OleError) Code() uintptr { + return uintptr(v.hr) +} + +// String description, either manually set or format message with error code. +func (v *OleError) String() string { + if v.description != "" { + return errstr(int(v.hr)) + " (" + v.description + ")" + } + return errstr(int(v.hr)) +} + +// Error implements error interface. +func (v *OleError) Error() string { + return v.String() +} + +// Description retrieves error summary, if there is one. +func (v *OleError) Description() string { + return v.description +} + +// SubError returns parent error, if there is one. +func (v *OleError) SubError() error { + return v.subError +} diff --git a/vendor/github.com/go-ole/go-ole/error_func.go b/vendor/github.com/go-ole/go-ole/error_func.go new file mode 100644 index 0000000000000..8a2ffaa2724fa --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error_func.go @@ -0,0 +1,8 @@ +// +build !windows + +package ole + +// errstr converts error code to string. +func errstr(errno int) string { + return "" +} diff --git a/vendor/github.com/go-ole/go-ole/error_windows.go b/vendor/github.com/go-ole/go-ole/error_windows.go new file mode 100644 index 0000000000000..d0e8e68595c4d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/error_windows.go @@ -0,0 +1,24 @@ +// +build windows + +package ole + +import ( + "fmt" + "syscall" + "unicode/utf16" +) + +// errstr converts error code to string. +func errstr(errno int) string { + // ask windows for the remaining errors + var flags uint32 = syscall.FORMAT_MESSAGE_FROM_SYSTEM | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS + b := make([]uint16, 300) + n, err := syscall.FormatMessage(flags, 0, uint32(errno), 0, b, nil) + if err != nil { + return fmt.Sprintf("error %d (FormatMessage failed with: %v)", errno, err) + } + // trim terminating \r and \n + for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { + } + return string(utf16.Decode(b[:n])) +} diff --git a/vendor/github.com/go-ole/go-ole/guid.go b/vendor/github.com/go-ole/go-ole/guid.go new file mode 100644 index 0000000000000..8d20f68fbf4a9 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/guid.go @@ -0,0 +1,284 @@ +package ole + +var ( + // IID_NULL is null Interface ID, used when no other Interface ID is known. + IID_NULL = NewGUID("{00000000-0000-0000-0000-000000000000}") + + // IID_IUnknown is for IUnknown interfaces. + IID_IUnknown = NewGUID("{00000000-0000-0000-C000-000000000046}") + + // IID_IDispatch is for IDispatch interfaces. + IID_IDispatch = NewGUID("{00020400-0000-0000-C000-000000000046}") + + // IID_IEnumVariant is for IEnumVariant interfaces + IID_IEnumVariant = NewGUID("{00020404-0000-0000-C000-000000000046}") + + // IID_IConnectionPointContainer is for IConnectionPointContainer interfaces. + IID_IConnectionPointContainer = NewGUID("{B196B284-BAB4-101A-B69C-00AA00341D07}") + + // IID_IConnectionPoint is for IConnectionPoint interfaces. + IID_IConnectionPoint = NewGUID("{B196B286-BAB4-101A-B69C-00AA00341D07}") + + // IID_IInspectable is for IInspectable interfaces. + IID_IInspectable = NewGUID("{AF86E2E0-B12D-4C6A-9C5A-D7AA65101E90}") + + // IID_IProvideClassInfo is for IProvideClassInfo interfaces. + IID_IProvideClassInfo = NewGUID("{B196B283-BAB4-101A-B69C-00AA00341D07}") +) + +// These are for testing and not part of any library. +var ( + // IID_ICOMTestString is for ICOMTestString interfaces. + // + // {E0133EB4-C36F-469A-9D3D-C66B84BE19ED} + IID_ICOMTestString = NewGUID("{E0133EB4-C36F-469A-9D3D-C66B84BE19ED}") + + // IID_ICOMTestInt8 is for ICOMTestInt8 interfaces. + // + // {BEB06610-EB84-4155-AF58-E2BFF53680B4} + IID_ICOMTestInt8 = NewGUID("{BEB06610-EB84-4155-AF58-E2BFF53680B4}") + + // IID_ICOMTestInt16 is for ICOMTestInt16 interfaces. + // + // {DAA3F9FA-761E-4976-A860-8364CE55F6FC} + IID_ICOMTestInt16 = NewGUID("{DAA3F9FA-761E-4976-A860-8364CE55F6FC}") + + // IID_ICOMTestInt32 is for ICOMTestInt32 interfaces. + // + // {E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0} + IID_ICOMTestInt32 = NewGUID("{E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0}") + + // IID_ICOMTestInt64 is for ICOMTestInt64 interfaces. + // + // {8D437CBC-B3ED-485C-BC32-C336432A1623} + IID_ICOMTestInt64 = NewGUID("{8D437CBC-B3ED-485C-BC32-C336432A1623}") + + // IID_ICOMTestFloat is for ICOMTestFloat interfaces. + // + // {BF1ED004-EA02-456A-AA55-2AC8AC6B054C} + IID_ICOMTestFloat = NewGUID("{BF1ED004-EA02-456A-AA55-2AC8AC6B054C}") + + // IID_ICOMTestDouble is for ICOMTestDouble interfaces. + // + // {BF908A81-8687-4E93-999F-D86FAB284BA0} + IID_ICOMTestDouble = NewGUID("{BF908A81-8687-4E93-999F-D86FAB284BA0}") + + // IID_ICOMTestBoolean is for ICOMTestBoolean interfaces. + // + // {D530E7A6-4EE8-40D1-8931-3D63B8605010} + IID_ICOMTestBoolean = NewGUID("{D530E7A6-4EE8-40D1-8931-3D63B8605010}") + + // IID_ICOMEchoTestObject is for ICOMEchoTestObject interfaces. + // + // {6485B1EF-D780-4834-A4FE-1EBB51746CA3} + IID_ICOMEchoTestObject = NewGUID("{6485B1EF-D780-4834-A4FE-1EBB51746CA3}") + + // IID_ICOMTestTypes is for ICOMTestTypes interfaces. + // + // {CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0} + IID_ICOMTestTypes = NewGUID("{CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0}") + + // CLSID_COMEchoTestObject is for COMEchoTestObject class. + // + // {3C24506A-AE9E-4D50-9157-EF317281F1B0} + CLSID_COMEchoTestObject = NewGUID("{3C24506A-AE9E-4D50-9157-EF317281F1B0}") + + // CLSID_COMTestScalarClass is for COMTestScalarClass class. + // + // {865B85C5-0334-4AC6-9EF6-AACEC8FC5E86} + CLSID_COMTestScalarClass = NewGUID("{865B85C5-0334-4AC6-9EF6-AACEC8FC5E86}") +) + +const hextable = "0123456789ABCDEF" +const emptyGUID = "{00000000-0000-0000-0000-000000000000}" + +// GUID is Windows API specific GUID type. +// +// This exists to match Windows GUID type for direct passing for COM. +// Format is in xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxxxxxx. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +// NewGUID converts the given string into a globally unique identifier that is +// compliant with the Windows API. +// +// The supplied string may be in any of these formats: +// +// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +// XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX +// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} +// +// The conversion of the supplied string is not case-sensitive. +func NewGUID(guid string) *GUID { + d := []byte(guid) + var d1, d2, d3, d4a, d4b []byte + + switch len(d) { + case 38: + if d[0] != '{' || d[37] != '}' { + return nil + } + d = d[1:37] + fallthrough + case 36: + if d[8] != '-' || d[13] != '-' || d[18] != '-' || d[23] != '-' { + return nil + } + d1 = d[0:8] + d2 = d[9:13] + d3 = d[14:18] + d4a = d[19:23] + d4b = d[24:36] + case 32: + d1 = d[0:8] + d2 = d[8:12] + d3 = d[12:16] + d4a = d[16:20] + d4b = d[20:32] + default: + return nil + } + + var g GUID + var ok1, ok2, ok3, ok4 bool + g.Data1, ok1 = decodeHexUint32(d1) + g.Data2, ok2 = decodeHexUint16(d2) + g.Data3, ok3 = decodeHexUint16(d3) + g.Data4, ok4 = decodeHexByte64(d4a, d4b) + if ok1 && ok2 && ok3 && ok4 { + return &g + } + return nil +} + +func decodeHexUint32(src []byte) (value uint32, ok bool) { + var b1, b2, b3, b4 byte + var ok1, ok2, ok3, ok4 bool + b1, ok1 = decodeHexByte(src[0], src[1]) + b2, ok2 = decodeHexByte(src[2], src[3]) + b3, ok3 = decodeHexByte(src[4], src[5]) + b4, ok4 = decodeHexByte(src[6], src[7]) + value = (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4) + ok = ok1 && ok2 && ok3 && ok4 + return +} + +func decodeHexUint16(src []byte) (value uint16, ok bool) { + var b1, b2 byte + var ok1, ok2 bool + b1, ok1 = decodeHexByte(src[0], src[1]) + b2, ok2 = decodeHexByte(src[2], src[3]) + value = (uint16(b1) << 8) | uint16(b2) + ok = ok1 && ok2 + return +} + +func decodeHexByte64(s1 []byte, s2 []byte) (value [8]byte, ok bool) { + var ok1, ok2, ok3, ok4, ok5, ok6, ok7, ok8 bool + value[0], ok1 = decodeHexByte(s1[0], s1[1]) + value[1], ok2 = decodeHexByte(s1[2], s1[3]) + value[2], ok3 = decodeHexByte(s2[0], s2[1]) + value[3], ok4 = decodeHexByte(s2[2], s2[3]) + value[4], ok5 = decodeHexByte(s2[4], s2[5]) + value[5], ok6 = decodeHexByte(s2[6], s2[7]) + value[6], ok7 = decodeHexByte(s2[8], s2[9]) + value[7], ok8 = decodeHexByte(s2[10], s2[11]) + ok = ok1 && ok2 && ok3 && ok4 && ok5 && ok6 && ok7 && ok8 + return +} + +func decodeHexByte(c1, c2 byte) (value byte, ok bool) { + var n1, n2 byte + var ok1, ok2 bool + n1, ok1 = decodeHexChar(c1) + n2, ok2 = decodeHexChar(c2) + value = (n1 << 4) | n2 + ok = ok1 && ok2 + return +} + +func decodeHexChar(c byte) (byte, bool) { + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + + return 0, false +} + +// String converts the GUID to string form. It will adhere to this pattern: +// +// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} +// +// If the GUID is nil, the string representation of an empty GUID is returned: +// +// {00000000-0000-0000-0000-000000000000} +func (guid *GUID) String() string { + if guid == nil { + return emptyGUID + } + + var c [38]byte + c[0] = '{' + putUint32Hex(c[1:9], guid.Data1) + c[9] = '-' + putUint16Hex(c[10:14], guid.Data2) + c[14] = '-' + putUint16Hex(c[15:19], guid.Data3) + c[19] = '-' + putByteHex(c[20:24], guid.Data4[0:2]) + c[24] = '-' + putByteHex(c[25:37], guid.Data4[2:8]) + c[37] = '}' + return string(c[:]) +} + +func putUint32Hex(b []byte, v uint32) { + b[0] = hextable[byte(v>>24)>>4] + b[1] = hextable[byte(v>>24)&0x0f] + b[2] = hextable[byte(v>>16)>>4] + b[3] = hextable[byte(v>>16)&0x0f] + b[4] = hextable[byte(v>>8)>>4] + b[5] = hextable[byte(v>>8)&0x0f] + b[6] = hextable[byte(v)>>4] + b[7] = hextable[byte(v)&0x0f] +} + +func putUint16Hex(b []byte, v uint16) { + b[0] = hextable[byte(v>>8)>>4] + b[1] = hextable[byte(v>>8)&0x0f] + b[2] = hextable[byte(v)>>4] + b[3] = hextable[byte(v)&0x0f] +} + +func putByteHex(dst, src []byte) { + for i := 0; i < len(src); i++ { + dst[i*2] = hextable[src[i]>>4] + dst[i*2+1] = hextable[src[i]&0x0f] + } +} + +// IsEqualGUID compares two GUID. +// +// Not constant time comparison. +func IsEqualGUID(guid1 *GUID, guid2 *GUID) bool { + return guid1.Data1 == guid2.Data1 && + guid1.Data2 == guid2.Data2 && + guid1.Data3 == guid2.Data3 && + guid1.Data4[0] == guid2.Data4[0] && + guid1.Data4[1] == guid2.Data4[1] && + guid1.Data4[2] == guid2.Data4[2] && + guid1.Data4[3] == guid2.Data4[3] && + guid1.Data4[4] == guid2.Data4[4] && + guid1.Data4[5] == guid2.Data4[5] && + guid1.Data4[6] == guid2.Data4[6] && + guid1.Data4[7] == guid2.Data4[7] +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go new file mode 100644 index 0000000000000..9e6c49f41f0a8 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go @@ -0,0 +1,20 @@ +package ole + +import "unsafe" + +type IConnectionPoint struct { + IUnknown +} + +type IConnectionPointVtbl struct { + IUnknownVtbl + GetConnectionInterface uintptr + GetConnectionPointContainer uintptr + Advise uintptr + Unadvise uintptr + EnumConnections uintptr +} + +func (v *IConnectionPoint) VTable() *IConnectionPointVtbl { + return (*IConnectionPointVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go new file mode 100644 index 0000000000000..5414dc3cd3bc3 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go @@ -0,0 +1,21 @@ +// +build !windows + +package ole + +import "unsafe" + +func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { + return int32(0) +} + +func (v *IConnectionPoint) Advise(unknown *IUnknown) (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} + +func (v *IConnectionPoint) Unadvise(cookie uint32) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) (err error) { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go new file mode 100644 index 0000000000000..32bc183248d95 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go @@ -0,0 +1,43 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { + // XXX: This doesn't look like it does what it's supposed to + return release((*IUnknown)(unsafe.Pointer(v))) +} + +func (v *IConnectionPoint) Advise(unknown *IUnknown) (cookie uint32, err error) { + hr, _, _ := syscall.Syscall( + v.VTable().Advise, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(unknown)), + uintptr(unsafe.Pointer(&cookie))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (v *IConnectionPoint) Unadvise(cookie uint32) (err error) { + hr, _, _ := syscall.Syscall( + v.VTable().Unadvise, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(cookie), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go new file mode 100644 index 0000000000000..165860d199e8c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go @@ -0,0 +1,17 @@ +package ole + +import "unsafe" + +type IConnectionPointContainer struct { + IUnknown +} + +type IConnectionPointContainerVtbl struct { + IUnknownVtbl + EnumConnectionPoints uintptr + FindConnectionPoint uintptr +} + +func (v *IConnectionPointContainer) VTable() *IConnectionPointContainerVtbl { + return (*IConnectionPointContainerVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go new file mode 100644 index 0000000000000..5dfa42aaebb78 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go @@ -0,0 +1,11 @@ +// +build !windows + +package ole + +func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go new file mode 100644 index 0000000000000..ad30d79efc4e6 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { + return NewError(E_NOTIMPL) +} + +func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) (err error) { + hr, _, _ := syscall.Syscall( + v.VTable().FindConnectionPoint, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(point))) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch.go b/vendor/github.com/go-ole/go-ole/idispatch.go new file mode 100644 index 0000000000000..d4af1240925dd --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch.go @@ -0,0 +1,94 @@ +package ole + +import "unsafe" + +type IDispatch struct { + IUnknown +} + +type IDispatchVtbl struct { + IUnknownVtbl + GetTypeInfoCount uintptr + GetTypeInfo uintptr + GetIDsOfNames uintptr + Invoke uintptr +} + +func (v *IDispatch) VTable() *IDispatchVtbl { + return (*IDispatchVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IDispatch) GetIDsOfName(names []string) (dispid []int32, err error) { + dispid, err = getIDsOfName(v, names) + return +} + +func (v *IDispatch) Invoke(dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { + result, err = invoke(v, dispid, dispatch, params...) + return +} + +func (v *IDispatch) GetTypeInfoCount() (c uint32, err error) { + c, err = getTypeInfoCount(v) + return +} + +func (v *IDispatch) GetTypeInfo() (tinfo *ITypeInfo, err error) { + tinfo, err = getTypeInfo(v) + return +} + +// GetSingleIDOfName is a helper that returns single display ID for IDispatch name. +// +// This replaces the common pattern of attempting to get a single name from the list of available +// IDs. It gives the first ID, if it is available. +func (v *IDispatch) GetSingleIDOfName(name string) (displayID int32, err error) { + var displayIDs []int32 + displayIDs, err = v.GetIDsOfName([]string{name}) + if err != nil { + return + } + displayID = displayIDs[0] + return +} + +// InvokeWithOptionalArgs accepts arguments as an array, works like Invoke. +// +// Accepts name and will attempt to retrieve Display ID to pass to Invoke. +// +// Passing params as an array is a workaround that could be fixed in later versions of Go that +// prevent passing empty params. During testing it was discovered that this is an acceptable way of +// getting around not being able to pass params normally. +func (v *IDispatch) InvokeWithOptionalArgs(name string, dispatch int16, params []interface{}) (result *VARIANT, err error) { + displayID, err := v.GetSingleIDOfName(name) + if err != nil { + return + } + + if len(params) < 1 { + result, err = v.Invoke(displayID, dispatch) + } else { + result, err = v.Invoke(displayID, dispatch, params...) + } + + return +} + +// CallMethod invokes named function with arguments on object. +func (v *IDispatch) CallMethod(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_METHOD, params) +} + +// GetProperty retrieves the property with the name with the ability to pass arguments. +// +// Most of the time you will not need to pass arguments as most objects do not allow for this +// feature. Or at least, should not allow for this feature. Some servers don't follow best practices +// and this is provided for those edge cases. +func (v *IDispatch) GetProperty(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYGET, params) +} + +// PutProperty attempts to mutate a property in the object. +func (v *IDispatch) PutProperty(name string, params ...interface{}) (*VARIANT, error) { + return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYPUT, params) +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_func.go b/vendor/github.com/go-ole/go-ole/idispatch_func.go new file mode 100644 index 0000000000000..b8fbbe319f1ac --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func getIDsOfName(disp *IDispatch, names []string) ([]int32, error) { + return []int32{}, NewError(E_NOTIMPL) +} + +func getTypeInfoCount(disp *IDispatch) (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} + +func getTypeInfo(disp *IDispatch) (*ITypeInfo, error) { + return nil, NewError(E_NOTIMPL) +} + +func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (*VARIANT, error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go new file mode 100644 index 0000000000000..b399f04791d46 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/idispatch_windows.go @@ -0,0 +1,202 @@ +// +build windows + +package ole + +import ( + "math/big" + "syscall" + "time" + "unsafe" +) + +func getIDsOfName(disp *IDispatch, names []string) (dispid []int32, err error) { + wnames := make([]*uint16, len(names)) + for i := 0; i < len(names); i++ { + wnames[i] = syscall.StringToUTF16Ptr(names[i]) + } + dispid = make([]int32, len(names)) + namelen := uint32(len(names)) + hr, _, _ := syscall.Syscall6( + disp.VTable().GetIDsOfNames, + 6, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(IID_NULL)), + uintptr(unsafe.Pointer(&wnames[0])), + uintptr(namelen), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&dispid[0]))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func getTypeInfoCount(disp *IDispatch) (c uint32, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetTypeInfoCount, + 2, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(&c)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func getTypeInfo(disp *IDispatch) (tinfo *ITypeInfo, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetTypeInfo, + 3, + uintptr(unsafe.Pointer(disp)), + uintptr(GetUserDefaultLCID()), + uintptr(unsafe.Pointer(&tinfo))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { + var dispparams DISPPARAMS + + if dispatch&DISPATCH_PROPERTYPUT != 0 { + dispnames := [1]int32{DISPID_PROPERTYPUT} + dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) + dispparams.cNamedArgs = 1 + } else if dispatch&DISPATCH_PROPERTYPUTREF != 0 { + dispnames := [1]int32{DISPID_PROPERTYPUT} + dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) + dispparams.cNamedArgs = 1 + } + var vargs []VARIANT + if len(params) > 0 { + vargs = make([]VARIANT, len(params)) + for i, v := range params { + //n := len(params)-i-1 + n := len(params) - i - 1 + VariantInit(&vargs[n]) + switch vv := v.(type) { + case bool: + if vv { + vargs[n] = NewVariant(VT_BOOL, 0xffff) + } else { + vargs[n] = NewVariant(VT_BOOL, 0) + } + case *bool: + vargs[n] = NewVariant(VT_BOOL|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*bool))))) + case uint8: + vargs[n] = NewVariant(VT_I1, int64(v.(uint8))) + case *uint8: + vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) + case int8: + vargs[n] = NewVariant(VT_I1, int64(v.(int8))) + case *int8: + vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) + case int16: + vargs[n] = NewVariant(VT_I2, int64(v.(int16))) + case *int16: + vargs[n] = NewVariant(VT_I2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int16))))) + case uint16: + vargs[n] = NewVariant(VT_UI2, int64(v.(uint16))) + case *uint16: + vargs[n] = NewVariant(VT_UI2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint16))))) + case int32: + vargs[n] = NewVariant(VT_I4, int64(v.(int32))) + case *int32: + vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int32))))) + case uint32: + vargs[n] = NewVariant(VT_UI4, int64(v.(uint32))) + case *uint32: + vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint32))))) + case int64: + vargs[n] = NewVariant(VT_I8, int64(v.(int64))) + case *int64: + vargs[n] = NewVariant(VT_I8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int64))))) + case uint64: + vargs[n] = NewVariant(VT_UI8, int64(uintptr(v.(uint64)))) + case *uint64: + vargs[n] = NewVariant(VT_UI8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint64))))) + case int: + vargs[n] = NewVariant(VT_I4, int64(v.(int))) + case *int: + vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int))))) + case uint: + vargs[n] = NewVariant(VT_UI4, int64(v.(uint))) + case *uint: + vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint))))) + case float32: + vargs[n] = NewVariant(VT_R4, *(*int64)(unsafe.Pointer(&vv))) + case *float32: + vargs[n] = NewVariant(VT_R4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float32))))) + case float64: + vargs[n] = NewVariant(VT_R8, *(*int64)(unsafe.Pointer(&vv))) + case *float64: + vargs[n] = NewVariant(VT_R8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float64))))) + case *big.Int: + vargs[n] = NewVariant(VT_DECIMAL, v.(*big.Int).Int64()) + case string: + vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(v.(string)))))) + case *string: + vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*string))))) + case time.Time: + s := vv.Format("2006-01-02 15:04:05") + vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(s))))) + case *time.Time: + s := vv.Format("2006-01-02 15:04:05") + vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(&s)))) + case *IDispatch: + vargs[n] = NewVariant(VT_DISPATCH, int64(uintptr(unsafe.Pointer(v.(*IDispatch))))) + case **IDispatch: + vargs[n] = NewVariant(VT_DISPATCH|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(**IDispatch))))) + case nil: + vargs[n] = NewVariant(VT_NULL, 0) + case *VARIANT: + vargs[n] = NewVariant(VT_VARIANT|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*VARIANT))))) + case []byte: + safeByteArray := safeArrayFromByteSlice(v.([]byte)) + vargs[n] = NewVariant(VT_ARRAY|VT_UI1, int64(uintptr(unsafe.Pointer(safeByteArray)))) + defer VariantClear(&vargs[n]) + case []string: + safeByteArray := safeArrayFromStringSlice(v.([]string)) + vargs[n] = NewVariant(VT_ARRAY|VT_BSTR, int64(uintptr(unsafe.Pointer(safeByteArray)))) + defer VariantClear(&vargs[n]) + default: + panic("unknown type") + } + } + dispparams.rgvarg = uintptr(unsafe.Pointer(&vargs[0])) + dispparams.cArgs = uint32(len(params)) + } + + result = new(VARIANT) + var excepInfo EXCEPINFO + VariantInit(result) + hr, _, _ := syscall.Syscall9( + disp.VTable().Invoke, + 9, + uintptr(unsafe.Pointer(disp)), + uintptr(dispid), + uintptr(unsafe.Pointer(IID_NULL)), + uintptr(GetUserDefaultLCID()), + uintptr(dispatch), + uintptr(unsafe.Pointer(&dispparams)), + uintptr(unsafe.Pointer(result)), + uintptr(unsafe.Pointer(&excepInfo)), + 0) + if hr != 0 { + excepInfo.renderStrings() + excepInfo.Clear() + err = NewErrorWithSubError(hr, excepInfo.description, excepInfo) + } + for i, varg := range vargs { + n := len(params) - i - 1 + if varg.VT == VT_BSTR && varg.Val != 0 { + SysFreeString(((*int16)(unsafe.Pointer(uintptr(varg.Val))))) + } + if varg.VT == (VT_BSTR|VT_BYREF) && varg.Val != 0 { + *(params[n].(*string)) = LpOleStrToString(*(**uint16)(unsafe.Pointer(uintptr(varg.Val)))) + } + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant.go b/vendor/github.com/go-ole/go-ole/ienumvariant.go new file mode 100644 index 0000000000000..2433897544300 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant.go @@ -0,0 +1,19 @@ +package ole + +import "unsafe" + +type IEnumVARIANT struct { + IUnknown +} + +type IEnumVARIANTVtbl struct { + IUnknownVtbl + Next uintptr + Skip uintptr + Reset uintptr + Clone uintptr +} + +func (v *IEnumVARIANT) VTable() *IEnumVARIANTVtbl { + return (*IEnumVARIANTVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_func.go b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go new file mode 100644 index 0000000000000..c14848199cb8c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func (enum *IEnumVARIANT) Clone() (*IEnumVARIANT, error) { + return nil, NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Reset() error { + return NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Skip(celt uint) error { + return NewError(E_NOTIMPL) +} + +func (enum *IEnumVARIANT) Next(celt uint) (VARIANT, uint, error) { + return NewVariant(VT_NULL, int64(0)), 0, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go new file mode 100644 index 0000000000000..4781f3b8b0075 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go @@ -0,0 +1,63 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (enum *IEnumVARIANT) Clone() (cloned *IEnumVARIANT, err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Clone, + 2, + uintptr(unsafe.Pointer(enum)), + uintptr(unsafe.Pointer(&cloned)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Reset() (err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Reset, + 1, + uintptr(unsafe.Pointer(enum)), + 0, + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Skip(celt uint) (err error) { + hr, _, _ := syscall.Syscall( + enum.VTable().Skip, + 2, + uintptr(unsafe.Pointer(enum)), + uintptr(celt), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} + +func (enum *IEnumVARIANT) Next(celt uint) (array VARIANT, length uint, err error) { + hr, _, _ := syscall.Syscall6( + enum.VTable().Next, + 4, + uintptr(unsafe.Pointer(enum)), + uintptr(celt), + uintptr(unsafe.Pointer(&array)), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable.go b/vendor/github.com/go-ole/go-ole/iinspectable.go new file mode 100644 index 0000000000000..f4a19e253af7d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable.go @@ -0,0 +1,18 @@ +package ole + +import "unsafe" + +type IInspectable struct { + IUnknown +} + +type IInspectableVtbl struct { + IUnknownVtbl + GetIIds uintptr + GetRuntimeClassName uintptr + GetTrustLevel uintptr +} + +func (v *IInspectable) VTable() *IInspectableVtbl { + return (*IInspectableVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_func.go b/vendor/github.com/go-ole/go-ole/iinspectable_func.go new file mode 100644 index 0000000000000..348829bf062fc --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable_func.go @@ -0,0 +1,15 @@ +// +build !windows + +package ole + +func (v *IInspectable) GetIids() ([]*GUID, error) { + return []*GUID{}, NewError(E_NOTIMPL) +} + +func (v *IInspectable) GetRuntimeClassName() (string, error) { + return "", NewError(E_NOTIMPL) +} + +func (v *IInspectable) GetTrustLevel() (uint32, error) { + return uint32(0), NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_windows.go b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go new file mode 100644 index 0000000000000..4519a4aa44951 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go @@ -0,0 +1,72 @@ +// +build windows + +package ole + +import ( + "bytes" + "encoding/binary" + "reflect" + "syscall" + "unsafe" +) + +func (v *IInspectable) GetIids() (iids []*GUID, err error) { + var count uint32 + var array uintptr + hr, _, _ := syscall.Syscall( + v.VTable().GetIIds, + 3, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&count)), + uintptr(unsafe.Pointer(&array))) + if hr != 0 { + err = NewError(hr) + return + } + defer CoTaskMemFree(array) + + iids = make([]*GUID, count) + byteCount := count * uint32(unsafe.Sizeof(GUID{})) + slicehdr := reflect.SliceHeader{Data: array, Len: int(byteCount), Cap: int(byteCount)} + byteSlice := *(*[]byte)(unsafe.Pointer(&slicehdr)) + reader := bytes.NewReader(byteSlice) + for i := range iids { + guid := GUID{} + err = binary.Read(reader, binary.LittleEndian, &guid) + if err != nil { + return + } + iids[i] = &guid + } + return +} + +func (v *IInspectable) GetRuntimeClassName() (s string, err error) { + var hstring HString + hr, _, _ := syscall.Syscall( + v.VTable().GetRuntimeClassName, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&hstring)), + 0) + if hr != 0 { + err = NewError(hr) + return + } + s = hstring.String() + DeleteHString(hstring) + return +} + +func (v *IInspectable) GetTrustLevel() (level uint32, err error) { + hr, _, _ := syscall.Syscall( + v.VTable().GetTrustLevel, + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&level)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go new file mode 100644 index 0000000000000..25f3a6f24a919 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go @@ -0,0 +1,21 @@ +package ole + +import "unsafe" + +type IProvideClassInfo struct { + IUnknown +} + +type IProvideClassInfoVtbl struct { + IUnknownVtbl + GetClassInfo uintptr +} + +func (v *IProvideClassInfo) VTable() *IProvideClassInfoVtbl { + return (*IProvideClassInfoVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IProvideClassInfo) GetClassInfo() (cinfo *ITypeInfo, err error) { + cinfo, err = getClassInfo(v) + return +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go new file mode 100644 index 0000000000000..7e3cb63ea7394 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go @@ -0,0 +1,7 @@ +// +build !windows + +package ole + +func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go new file mode 100644 index 0000000000000..2ad0163949746 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go @@ -0,0 +1,21 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { + hr, _, _ := syscall.Syscall( + disp.VTable().GetClassInfo, + 2, + uintptr(unsafe.Pointer(disp)), + uintptr(unsafe.Pointer(&tinfo)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo.go b/vendor/github.com/go-ole/go-ole/itypeinfo.go new file mode 100644 index 0000000000000..dd3c5e21bbf31 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo.go @@ -0,0 +1,34 @@ +package ole + +import "unsafe" + +type ITypeInfo struct { + IUnknown +} + +type ITypeInfoVtbl struct { + IUnknownVtbl + GetTypeAttr uintptr + GetTypeComp uintptr + GetFuncDesc uintptr + GetVarDesc uintptr + GetNames uintptr + GetRefTypeOfImplType uintptr + GetImplTypeFlags uintptr + GetIDsOfNames uintptr + Invoke uintptr + GetDocumentation uintptr + GetDllEntry uintptr + GetRefTypeInfo uintptr + AddressOfMember uintptr + CreateInstance uintptr + GetMops uintptr + GetContainingTypeLib uintptr + ReleaseTypeAttr uintptr + ReleaseFuncDesc uintptr + ReleaseVarDesc uintptr +} + +func (v *ITypeInfo) VTable() *ITypeInfoVtbl { + return (*ITypeInfoVtbl)(unsafe.Pointer(v.RawVTable)) +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_func.go b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go new file mode 100644 index 0000000000000..8364a659bae1d --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go @@ -0,0 +1,7 @@ +// +build !windows + +package ole + +func (v *ITypeInfo) GetTypeAttr() (*TYPEATTR, error) { + return nil, NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go new file mode 100644 index 0000000000000..54782b3da5dd5 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go @@ -0,0 +1,21 @@ +// +build windows + +package ole + +import ( + "syscall" + "unsafe" +) + +func (v *ITypeInfo) GetTypeAttr() (tattr *TYPEATTR, err error) { + hr, _, _ := syscall.Syscall( + uintptr(v.VTable().GetTypeAttr), + 2, + uintptr(unsafe.Pointer(v)), + uintptr(unsafe.Pointer(&tattr)), + 0) + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown.go b/vendor/github.com/go-ole/go-ole/iunknown.go new file mode 100644 index 0000000000000..108f28ea61084 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown.go @@ -0,0 +1,57 @@ +package ole + +import "unsafe" + +type IUnknown struct { + RawVTable *interface{} +} + +type IUnknownVtbl struct { + QueryInterface uintptr + AddRef uintptr + Release uintptr +} + +type UnknownLike interface { + QueryInterface(iid *GUID) (disp *IDispatch, err error) + AddRef() int32 + Release() int32 +} + +func (v *IUnknown) VTable() *IUnknownVtbl { + return (*IUnknownVtbl)(unsafe.Pointer(v.RawVTable)) +} + +func (v *IUnknown) PutQueryInterface(interfaceID *GUID, obj interface{}) error { + return reflectQueryInterface(v, v.VTable().QueryInterface, interfaceID, obj) +} + +func (v *IUnknown) IDispatch(interfaceID *GUID) (dispatch *IDispatch, err error) { + err = v.PutQueryInterface(interfaceID, &dispatch) + return +} + +func (v *IUnknown) IEnumVARIANT(interfaceID *GUID) (enum *IEnumVARIANT, err error) { + err = v.PutQueryInterface(interfaceID, &enum) + return +} + +func (v *IUnknown) QueryInterface(iid *GUID) (*IDispatch, error) { + return queryInterface(v, iid) +} + +func (v *IUnknown) MustQueryInterface(iid *GUID) (disp *IDispatch) { + unk, err := queryInterface(v, iid) + if err != nil { + panic(err) + } + return unk +} + +func (v *IUnknown) AddRef() int32 { + return addRef(v) +} + +func (v *IUnknown) Release() int32 { + return release(v) +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_func.go b/vendor/github.com/go-ole/go-ole/iunknown_func.go new file mode 100644 index 0000000000000..d0a62cfd73027 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown_func.go @@ -0,0 +1,19 @@ +// +build !windows + +package ole + +func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { + return NewError(E_NOTIMPL) +} + +func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { + return nil, NewError(E_NOTIMPL) +} + +func addRef(unk *IUnknown) int32 { + return 0 +} + +func release(unk *IUnknown) int32 { + return 0 +} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_windows.go b/vendor/github.com/go-ole/go-ole/iunknown_windows.go new file mode 100644 index 0000000000000..ede5bb8c17322 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/iunknown_windows.go @@ -0,0 +1,58 @@ +// +build windows + +package ole + +import ( + "reflect" + "syscall" + "unsafe" +) + +func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { + selfValue := reflect.ValueOf(self).Elem() + objValue := reflect.ValueOf(obj).Elem() + + hr, _, _ := syscall.Syscall( + method, + 3, + selfValue.UnsafeAddr(), + uintptr(unsafe.Pointer(interfaceID)), + objValue.Addr().Pointer()) + if hr != 0 { + err = NewError(hr) + } + return +} + +func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { + hr, _, _ := syscall.Syscall( + unk.VTable().QueryInterface, + 3, + uintptr(unsafe.Pointer(unk)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&disp))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func addRef(unk *IUnknown) int32 { + ret, _, _ := syscall.Syscall( + unk.VTable().AddRef, + 1, + uintptr(unsafe.Pointer(unk)), + 0, + 0) + return int32(ret) +} + +func release(unk *IUnknown) int32 { + ret, _, _ := syscall.Syscall( + unk.VTable().Release, + 1, + uintptr(unsafe.Pointer(unk)), + 0, + 0) + return int32(ret) +} diff --git a/vendor/github.com/go-ole/go-ole/ole.go b/vendor/github.com/go-ole/go-ole/ole.go new file mode 100644 index 0000000000000..dbd132bbd702f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/ole.go @@ -0,0 +1,190 @@ +package ole + +import ( + "fmt" + "strings" + "unsafe" +) + +// DISPPARAMS are the arguments that passed to methods or property. +type DISPPARAMS struct { + rgvarg uintptr + rgdispidNamedArgs uintptr + cArgs uint32 + cNamedArgs uint32 +} + +// EXCEPINFO defines exception info. +type EXCEPINFO struct { + wCode uint16 + wReserved uint16 + bstrSource *uint16 + bstrDescription *uint16 + bstrHelpFile *uint16 + dwHelpContext uint32 + pvReserved uintptr + pfnDeferredFillIn uintptr + scode uint32 + + // Go-specific part. Don't move upper cos it'll break structure layout for native code. + rendered bool + source string + description string + helpFile string +} + +// renderStrings translates BSTR strings to Go ones so `.Error` and `.String` +// could be safely called after `.Clear`. We need this when we can't rely on +// a caller to call `.Clear`. +func (e *EXCEPINFO) renderStrings() { + e.rendered = true + if e.bstrSource == nil { + e.source = "" + } else { + e.source = BstrToString(e.bstrSource) + } + if e.bstrDescription == nil { + e.description = "" + } else { + e.description = BstrToString(e.bstrDescription) + } + if e.bstrHelpFile == nil { + e.helpFile = "" + } else { + e.helpFile = BstrToString(e.bstrHelpFile) + } +} + +// Clear frees BSTR strings inside an EXCEPINFO and set it to NULL. +func (e *EXCEPINFO) Clear() { + freeBSTR := func(s *uint16) { + // SysFreeString don't return errors and is safe for call's on NULL. + // https://docs.microsoft.com/en-us/windows/win32/api/oleauto/nf-oleauto-sysfreestring + _ = SysFreeString((*int16)(unsafe.Pointer(s))) + } + + if e.bstrSource != nil { + freeBSTR(e.bstrSource) + e.bstrSource = nil + } + if e.bstrDescription != nil { + freeBSTR(e.bstrDescription) + e.bstrDescription = nil + } + if e.bstrHelpFile != nil { + freeBSTR(e.bstrHelpFile) + e.bstrHelpFile = nil + } +} + +// WCode return wCode in EXCEPINFO. +func (e EXCEPINFO) WCode() uint16 { + return e.wCode +} + +// SCODE return scode in EXCEPINFO. +func (e EXCEPINFO) SCODE() uint32 { + return e.scode +} + +// String convert EXCEPINFO to string. +func (e EXCEPINFO) String() string { + if !e.rendered { + e.renderStrings() + } + return fmt.Sprintf( + "wCode: %#x, bstrSource: %v, bstrDescription: %v, bstrHelpFile: %v, dwHelpContext: %#x, scode: %#x", + e.wCode, e.source, e.description, e.helpFile, e.dwHelpContext, e.scode, + ) +} + +// Error implements error interface and returns error string. +func (e EXCEPINFO) Error() string { + if !e.rendered { + e.renderStrings() + } + + if e.description != "" { + return strings.TrimSpace(e.description) + } + + code := e.scode + if e.wCode != 0 { + code = uint32(e.wCode) + } + return fmt.Sprintf("%v: %#x", e.source, code) +} + +// PARAMDATA defines parameter data type. +type PARAMDATA struct { + Name *int16 + Vt uint16 +} + +// METHODDATA defines method info. +type METHODDATA struct { + Name *uint16 + Data *PARAMDATA + Dispid int32 + Meth uint32 + CC int32 + CArgs uint32 + Flags uint16 + VtReturn uint32 +} + +// INTERFACEDATA defines interface info. +type INTERFACEDATA struct { + MethodData *METHODDATA + CMembers uint32 +} + +// Point is 2D vector type. +type Point struct { + X int32 + Y int32 +} + +// Msg is message between processes. +type Msg struct { + Hwnd uint32 + Message uint32 + Wparam int32 + Lparam int32 + Time uint32 + Pt Point +} + +// TYPEDESC defines data type. +type TYPEDESC struct { + Hreftype uint32 + VT uint16 +} + +// IDLDESC defines IDL info. +type IDLDESC struct { + DwReserved uint32 + WIDLFlags uint16 +} + +// TYPEATTR defines type info. +type TYPEATTR struct { + Guid GUID + Lcid uint32 + dwReserved uint32 + MemidConstructor int32 + MemidDestructor int32 + LpstrSchema *uint16 + CbSizeInstance uint32 + Typekind int32 + CFuncs uint16 + CVars uint16 + CImplTypes uint16 + CbSizeVft uint16 + CbAlignment uint16 + WTypeFlags uint16 + WMajorVerNum uint16 + WMinorVerNum uint16 + TdescAlias TYPEDESC + IdldescType IDLDESC +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection.go b/vendor/github.com/go-ole/go-ole/oleutil/connection.go new file mode 100644 index 0000000000000..60df73cda0014 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection.go @@ -0,0 +1,100 @@ +// +build windows + +package oleutil + +import ( + "reflect" + "unsafe" + + ole "github.com/go-ole/go-ole" +) + +type stdDispatch struct { + lpVtbl *stdDispatchVtbl + ref int32 + iid *ole.GUID + iface interface{} + funcMap map[string]int32 +} + +type stdDispatchVtbl struct { + pQueryInterface uintptr + pAddRef uintptr + pRelease uintptr + pGetTypeInfoCount uintptr + pGetTypeInfo uintptr + pGetIDsOfNames uintptr + pInvoke uintptr +} + +func dispQueryInterface(this *ole.IUnknown, iid *ole.GUID, punk **ole.IUnknown) uint32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + *punk = nil + if ole.IsEqualGUID(iid, ole.IID_IUnknown) || + ole.IsEqualGUID(iid, ole.IID_IDispatch) { + dispAddRef(this) + *punk = this + return ole.S_OK + } + if ole.IsEqualGUID(iid, pthis.iid) { + dispAddRef(this) + *punk = this + return ole.S_OK + } + return ole.E_NOINTERFACE +} + +func dispAddRef(this *ole.IUnknown) int32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + pthis.ref++ + return pthis.ref +} + +func dispRelease(this *ole.IUnknown) int32 { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + pthis.ref-- + return pthis.ref +} + +func dispGetIDsOfNames(this *ole.IUnknown, iid *ole.GUID, wnames []*uint16, namelen int, lcid int, pdisp []int32) uintptr { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + names := make([]string, len(wnames)) + for i := 0; i < len(names); i++ { + names[i] = ole.LpOleStrToString(wnames[i]) + } + for n := 0; n < namelen; n++ { + if id, ok := pthis.funcMap[names[n]]; ok { + pdisp[n] = id + } + } + return ole.S_OK +} + +func dispGetTypeInfoCount(pcount *int) uintptr { + if pcount != nil { + *pcount = 0 + } + return ole.S_OK +} + +func dispGetTypeInfo(ptypeif *uintptr) uintptr { + return ole.E_NOTIMPL +} + +func dispInvoke(this *ole.IDispatch, dispid int32, riid *ole.GUID, lcid int, flags int16, dispparams *ole.DISPPARAMS, result *ole.VARIANT, pexcepinfo *ole.EXCEPINFO, nerr *uint) uintptr { + pthis := (*stdDispatch)(unsafe.Pointer(this)) + found := "" + for name, id := range pthis.funcMap { + if id == dispid { + found = name + } + } + if found != "" { + rv := reflect.ValueOf(pthis.iface).Elem() + rm := rv.MethodByName(found) + rr := rm.Call([]reflect.Value{}) + println(len(rr)) + return ole.S_OK + } + return ole.E_NOTIMPL +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go new file mode 100644 index 0000000000000..8818fb8275ad2 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go @@ -0,0 +1,10 @@ +// +build !windows + +package oleutil + +import ole "github.com/go-ole/go-ole" + +// ConnectObject creates a connection point between two services for communication. +func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (uint32, error) { + return 0, ole.NewError(ole.E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go new file mode 100644 index 0000000000000..ab9c0d8dcbd4f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go @@ -0,0 +1,58 @@ +// +build windows + +package oleutil + +import ( + "reflect" + "syscall" + "unsafe" + + ole "github.com/go-ole/go-ole" +) + +// ConnectObject creates a connection point between two services for communication. +func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (cookie uint32, err error) { + unknown, err := disp.QueryInterface(ole.IID_IConnectionPointContainer) + if err != nil { + return + } + + container := (*ole.IConnectionPointContainer)(unsafe.Pointer(unknown)) + var point *ole.IConnectionPoint + err = container.FindConnectionPoint(iid, &point) + if err != nil { + return + } + if edisp, ok := idisp.(*ole.IUnknown); ok { + cookie, err = point.Advise(edisp) + container.Release() + if err != nil { + return + } + } + rv := reflect.ValueOf(disp).Elem() + if rv.Type().Kind() == reflect.Struct { + dest := &stdDispatch{} + dest.lpVtbl = &stdDispatchVtbl{} + dest.lpVtbl.pQueryInterface = syscall.NewCallback(dispQueryInterface) + dest.lpVtbl.pAddRef = syscall.NewCallback(dispAddRef) + dest.lpVtbl.pRelease = syscall.NewCallback(dispRelease) + dest.lpVtbl.pGetTypeInfoCount = syscall.NewCallback(dispGetTypeInfoCount) + dest.lpVtbl.pGetTypeInfo = syscall.NewCallback(dispGetTypeInfo) + dest.lpVtbl.pGetIDsOfNames = syscall.NewCallback(dispGetIDsOfNames) + dest.lpVtbl.pInvoke = syscall.NewCallback(dispInvoke) + dest.iface = disp + dest.iid = iid + cookie, err = point.Advise((*ole.IUnknown)(unsafe.Pointer(dest))) + container.Release() + if err != nil { + point.Release() + return + } + return + } + + container.Release() + + return 0, ole.NewError(ole.E_INVALIDARG) +} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/go-get.go b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go new file mode 100644 index 0000000000000..58347628f24c6 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go @@ -0,0 +1,6 @@ +// This file is here so go get succeeds as without it errors with: +// no buildable Go source files in ... +// +// +build !windows + +package oleutil diff --git a/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go new file mode 100644 index 0000000000000..f7803c1e30f24 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go @@ -0,0 +1,127 @@ +package oleutil + +import ole "github.com/go-ole/go-ole" + +// ClassIDFrom retrieves class ID whether given is program ID or application string. +func ClassIDFrom(programID string) (classID *ole.GUID, err error) { + return ole.ClassIDFrom(programID) +} + +// CreateObject creates object from programID based on interface type. +// +// Only supports IUnknown. +// +// Program ID can be either program ID or application string. +func CreateObject(programID string) (unknown *ole.IUnknown, err error) { + classID, err := ole.ClassIDFrom(programID) + if err != nil { + return + } + + unknown, err = ole.CreateInstance(classID, ole.IID_IUnknown) + if err != nil { + return + } + + return +} + +// GetActiveObject retrieves active object for program ID and interface ID based +// on interface type. +// +// Only supports IUnknown. +// +// Program ID can be either program ID or application string. +func GetActiveObject(programID string) (unknown *ole.IUnknown, err error) { + classID, err := ole.ClassIDFrom(programID) + if err != nil { + return + } + + unknown, err = ole.GetActiveObject(classID, ole.IID_IUnknown) + if err != nil { + return + } + + return +} + +// CallMethod calls method on IDispatch with parameters. +func CallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_METHOD, params) +} + +// MustCallMethod calls method on IDispatch with parameters or panics. +func MustCallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := CallMethod(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// GetProperty retrieves property from IDispatch. +func GetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYGET, params) +} + +// MustGetProperty retrieves property from IDispatch or panics. +func MustGetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := GetProperty(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// PutProperty mutates property. +func PutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUT, params) +} + +// MustPutProperty mutates property or panics. +func MustPutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := PutProperty(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +// PutPropertyRef mutates property reference. +func PutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUTREF, params) +} + +// MustPutPropertyRef mutates property reference or panics. +func MustPutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := PutPropertyRef(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + +func ForEach(disp *ole.IDispatch, f func(v *ole.VARIANT) error) error { + newEnum, err := disp.GetProperty("_NewEnum") + if err != nil { + return err + } + defer newEnum.Clear() + + enum, err := newEnum.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) + if err != nil { + return err + } + defer enum.Release() + + for item, length, err := enum.Next(1); length > 0; item, length, err = enum.Next(1) { + if err != nil { + return err + } + if ferr := f(&item); ferr != nil { + return ferr + } + } + return nil +} diff --git a/vendor/github.com/go-ole/go-ole/safearray.go b/vendor/github.com/go-ole/go-ole/safearray.go new file mode 100644 index 0000000000000..a5201b56c3d90 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray.go @@ -0,0 +1,27 @@ +// Package is meant to retrieve and process safe array data returned from COM. + +package ole + +// SafeArrayBound defines the SafeArray boundaries. +type SafeArrayBound struct { + Elements uint32 + LowerBound int32 +} + +// SafeArray is how COM handles arrays. +type SafeArray struct { + Dimensions uint16 + FeaturesFlag uint16 + ElementsSize uint32 + LocksAmount uint32 + Data uint32 + Bounds [16]byte +} + +// SAFEARRAY is obsolete, exists for backwards compatibility. +// Use SafeArray +type SAFEARRAY SafeArray + +// SAFEARRAYBOUND is obsolete, exists for backwards compatibility. +// Use SafeArrayBound +type SAFEARRAYBOUND SafeArrayBound diff --git a/vendor/github.com/go-ole/go-ole/safearray_func.go b/vendor/github.com/go-ole/go-ole/safearray_func.go new file mode 100644 index 0000000000000..0dee670ceb6d5 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray_func.go @@ -0,0 +1,211 @@ +// +build !windows + +package ole + +import ( + "unsafe" +) + +// safeArrayAccessData returns raw array pointer. +// +// AKA: SafeArrayAccessData in Windows API. +func safeArrayAccessData(safearray *SafeArray) (uintptr, error) { + return uintptr(0), NewError(E_NOTIMPL) +} + +// safeArrayUnaccessData releases raw array. +// +// AKA: SafeArrayUnaccessData in Windows API. +func safeArrayUnaccessData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayAllocData allocates SafeArray. +// +// AKA: SafeArrayAllocData in Windows API. +func safeArrayAllocData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayAllocDescriptor allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptor in Windows API. +func safeArrayAllocDescriptor(dimensions uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayAllocDescriptorEx allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptorEx in Windows API. +func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCopy returns copy of SafeArray. +// +// AKA: SafeArrayCopy in Windows API. +func safeArrayCopy(original *SafeArray) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCopyData duplicates SafeArray into another SafeArray object. +// +// AKA: SafeArrayCopyData in Windows API. +func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayCreate creates SafeArray. +// +// AKA: SafeArrayCreate in Windows API. +func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateEx creates SafeArray. +// +// AKA: SafeArrayCreateEx in Windows API. +func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateVector creates SafeArray. +// +// AKA: SafeArrayCreateVector in Windows API. +func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayCreateVectorEx creates SafeArray. +// +// AKA: SafeArrayCreateVectorEx in Windows API. +func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (*SafeArray, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayDestroy destroys SafeArray object. +// +// AKA: SafeArrayDestroy in Windows API. +func safeArrayDestroy(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayDestroyData destroys SafeArray object. +// +// AKA: SafeArrayDestroyData in Windows API. +func safeArrayDestroyData(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayDestroyDescriptor destroys SafeArray object. +// +// AKA: SafeArrayDestroyDescriptor in Windows API. +func safeArrayDestroyDescriptor(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetDim is the amount of dimensions in the SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetDim in Windows API. +func safeArrayGetDim(safearray *SafeArray) (*uint32, error) { + u := uint32(0) + return &u, NewError(E_NOTIMPL) +} + +// safeArrayGetElementSize is the element size in bytes. +// +// AKA: SafeArrayGetElemsize in Windows API. +func safeArrayGetElementSize(safearray *SafeArray) (*uint32, error) { + u := uint32(0) + return &u, NewError(E_NOTIMPL) +} + +// safeArrayGetElement retrieves element at given index. +func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetElement retrieves element at given index and converts to string. +func safeArrayGetElementString(safearray *SafeArray, index int32) (string, error) { + return "", NewError(E_NOTIMPL) +} + +// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. +// +// AKA: SafeArrayGetIID in Windows API. +func safeArrayGetIID(safearray *SafeArray) (*GUID, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArrayGetLBound returns lower bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetLBound in Windows API. +func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) +} + +// safeArrayGetUBound returns upper bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetUBound in Windows API. +func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) +} + +// safeArrayGetVartype returns data type of SafeArray. +// +// AKA: SafeArrayGetVartype in Windows API. +func safeArrayGetVartype(safearray *SafeArray) (uint16, error) { + return uint16(0), NewError(E_NOTIMPL) +} + +// safeArrayLock locks SafeArray for reading to modify SafeArray. +// +// This must be called during some calls to ensure that another process does not +// read or write to the SafeArray during editing. +// +// AKA: SafeArrayLock in Windows API. +func safeArrayLock(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayUnlock unlocks SafeArray for reading. +// +// AKA: SafeArrayUnlock in Windows API. +func safeArrayUnlock(safearray *SafeArray) error { + return NewError(E_NOTIMPL) +} + +// safeArrayPutElement stores the data element at the specified location in the +// array. +// +// AKA: SafeArrayPutElement in Windows API. +func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) error { + return NewError(E_NOTIMPL) +} + +// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. +// +// AKA: SafeArrayGetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArrayGetRecordInfo(safearray *SafeArray) (interface{}, error) { + return nil, NewError(E_NOTIMPL) +} + +// safeArraySetRecordInfo mutates IRecordInfo info for custom types. +// +// AKA: SafeArraySetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) error { + return NewError(E_NOTIMPL) +} diff --git a/vendor/github.com/go-ole/go-ole/safearray_windows.go b/vendor/github.com/go-ole/go-ole/safearray_windows.go new file mode 100644 index 0000000000000..0c1b3a10ff9fa --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearray_windows.go @@ -0,0 +1,337 @@ +// +build windows + +package ole + +import ( + "unsafe" +) + +var ( + procSafeArrayAccessData = modoleaut32.NewProc("SafeArrayAccessData") + procSafeArrayAllocData = modoleaut32.NewProc("SafeArrayAllocData") + procSafeArrayAllocDescriptor = modoleaut32.NewProc("SafeArrayAllocDescriptor") + procSafeArrayAllocDescriptorEx = modoleaut32.NewProc("SafeArrayAllocDescriptorEx") + procSafeArrayCopy = modoleaut32.NewProc("SafeArrayCopy") + procSafeArrayCopyData = modoleaut32.NewProc("SafeArrayCopyData") + procSafeArrayCreate = modoleaut32.NewProc("SafeArrayCreate") + procSafeArrayCreateEx = modoleaut32.NewProc("SafeArrayCreateEx") + procSafeArrayCreateVector = modoleaut32.NewProc("SafeArrayCreateVector") + procSafeArrayCreateVectorEx = modoleaut32.NewProc("SafeArrayCreateVectorEx") + procSafeArrayDestroy = modoleaut32.NewProc("SafeArrayDestroy") + procSafeArrayDestroyData = modoleaut32.NewProc("SafeArrayDestroyData") + procSafeArrayDestroyDescriptor = modoleaut32.NewProc("SafeArrayDestroyDescriptor") + procSafeArrayGetDim = modoleaut32.NewProc("SafeArrayGetDim") + procSafeArrayGetElement = modoleaut32.NewProc("SafeArrayGetElement") + procSafeArrayGetElemsize = modoleaut32.NewProc("SafeArrayGetElemsize") + procSafeArrayGetIID = modoleaut32.NewProc("SafeArrayGetIID") + procSafeArrayGetLBound = modoleaut32.NewProc("SafeArrayGetLBound") + procSafeArrayGetUBound = modoleaut32.NewProc("SafeArrayGetUBound") + procSafeArrayGetVartype = modoleaut32.NewProc("SafeArrayGetVartype") + procSafeArrayLock = modoleaut32.NewProc("SafeArrayLock") + procSafeArrayPtrOfIndex = modoleaut32.NewProc("SafeArrayPtrOfIndex") + procSafeArrayUnaccessData = modoleaut32.NewProc("SafeArrayUnaccessData") + procSafeArrayUnlock = modoleaut32.NewProc("SafeArrayUnlock") + procSafeArrayPutElement = modoleaut32.NewProc("SafeArrayPutElement") + //procSafeArrayRedim = modoleaut32.NewProc("SafeArrayRedim") // TODO + //procSafeArraySetIID = modoleaut32.NewProc("SafeArraySetIID") // TODO + procSafeArrayGetRecordInfo = modoleaut32.NewProc("SafeArrayGetRecordInfo") + procSafeArraySetRecordInfo = modoleaut32.NewProc("SafeArraySetRecordInfo") +) + +// safeArrayAccessData returns raw array pointer. +// +// AKA: SafeArrayAccessData in Windows API. +// Todo: Test +func safeArrayAccessData(safearray *SafeArray) (element uintptr, err error) { + err = convertHresultToError( + procSafeArrayAccessData.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&element)))) + return +} + +// safeArrayUnaccessData releases raw array. +// +// AKA: SafeArrayUnaccessData in Windows API. +func safeArrayUnaccessData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayUnaccessData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayAllocData allocates SafeArray. +// +// AKA: SafeArrayAllocData in Windows API. +func safeArrayAllocData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayAllocData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayAllocDescriptor allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptor in Windows API. +func safeArrayAllocDescriptor(dimensions uint32) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayAllocDescriptor.Call(uintptr(dimensions), uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayAllocDescriptorEx allocates SafeArray. +// +// AKA: SafeArrayAllocDescriptorEx in Windows API. +func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayAllocDescriptorEx.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayCopy returns copy of SafeArray. +// +// AKA: SafeArrayCopy in Windows API. +func safeArrayCopy(original *SafeArray) (safearray *SafeArray, err error) { + err = convertHresultToError( + procSafeArrayCopy.Call( + uintptr(unsafe.Pointer(original)), + uintptr(unsafe.Pointer(&safearray)))) + return +} + +// safeArrayCopyData duplicates SafeArray into another SafeArray object. +// +// AKA: SafeArrayCopyData in Windows API. +func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) (err error) { + err = convertHresultToError( + procSafeArrayCopyData.Call( + uintptr(unsafe.Pointer(original)), + uintptr(unsafe.Pointer(duplicate)))) + return +} + +// safeArrayCreate creates SafeArray. +// +// AKA: SafeArrayCreate in Windows API. +func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreate.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(bounds))) + safearray = (*SafeArray)(unsafe.Pointer(&sa)) + return +} + +// safeArrayCreateEx creates SafeArray. +// +// AKA: SafeArrayCreateEx in Windows API. +func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateEx.Call( + uintptr(variantType), + uintptr(dimensions), + uintptr(unsafe.Pointer(bounds)), + extra) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayCreateVector creates SafeArray. +// +// AKA: SafeArrayCreateVector in Windows API. +func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateVector.Call( + uintptr(variantType), + uintptr(lowerBound), + uintptr(length)) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayCreateVectorEx creates SafeArray. +// +// AKA: SafeArrayCreateVectorEx in Windows API. +func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (safearray *SafeArray, err error) { + sa, _, err := procSafeArrayCreateVectorEx.Call( + uintptr(variantType), + uintptr(lowerBound), + uintptr(length), + extra) + safearray = (*SafeArray)(unsafe.Pointer(sa)) + return +} + +// safeArrayDestroy destroys SafeArray object. +// +// AKA: SafeArrayDestroy in Windows API. +func safeArrayDestroy(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroy.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayDestroyData destroys SafeArray object. +// +// AKA: SafeArrayDestroyData in Windows API. +func safeArrayDestroyData(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroyData.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayDestroyDescriptor destroys SafeArray object. +// +// AKA: SafeArrayDestroyDescriptor in Windows API. +func safeArrayDestroyDescriptor(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayDestroyDescriptor.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayGetDim is the amount of dimensions in the SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetDim in Windows API. +func safeArrayGetDim(safearray *SafeArray) (dimensions *uint32, err error) { + l, _, err := procSafeArrayGetDim.Call(uintptr(unsafe.Pointer(safearray))) + dimensions = (*uint32)(unsafe.Pointer(l)) + return +} + +// safeArrayGetElementSize is the element size in bytes. +// +// AKA: SafeArrayGetElemsize in Windows API. +func safeArrayGetElementSize(safearray *SafeArray) (length *uint32, err error) { + l, _, err := procSafeArrayGetElemsize.Call(uintptr(unsafe.Pointer(safearray))) + length = (*uint32)(unsafe.Pointer(l)) + return +} + +// safeArrayGetElement retrieves element at given index. +func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error { + return convertHresultToError( + procSafeArrayGetElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(pv))) +} + +// safeArrayGetElementString retrieves element at given index and converts to string. +func safeArrayGetElementString(safearray *SafeArray, index int32) (str string, err error) { + var element *int16 + err = convertHresultToError( + procSafeArrayGetElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(unsafe.Pointer(&element)))) + str = BstrToString(*(**uint16)(unsafe.Pointer(&element))) + SysFreeString(element) + return +} + +// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. +// +// AKA: SafeArrayGetIID in Windows API. +func safeArrayGetIID(safearray *SafeArray) (guid *GUID, err error) { + err = convertHresultToError( + procSafeArrayGetIID.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&guid)))) + return +} + +// safeArrayGetLBound returns lower bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetLBound in Windows API. +func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int32, err error) { + err = convertHresultToError( + procSafeArrayGetLBound.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(dimension), + uintptr(unsafe.Pointer(&lowerBound)))) + return +} + +// safeArrayGetUBound returns upper bounds of SafeArray. +// +// SafeArrays may have multiple dimensions. Meaning, it could be +// multidimensional array. +// +// AKA: SafeArrayGetUBound in Windows API. +func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (upperBound int32, err error) { + err = convertHresultToError( + procSafeArrayGetUBound.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(dimension), + uintptr(unsafe.Pointer(&upperBound)))) + return +} + +// safeArrayGetVartype returns data type of SafeArray. +// +// AKA: SafeArrayGetVartype in Windows API. +func safeArrayGetVartype(safearray *SafeArray) (varType uint16, err error) { + err = convertHresultToError( + procSafeArrayGetVartype.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&varType)))) + return +} + +// safeArrayLock locks SafeArray for reading to modify SafeArray. +// +// This must be called during some calls to ensure that another process does not +// read or write to the SafeArray during editing. +// +// AKA: SafeArrayLock in Windows API. +func safeArrayLock(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayLock.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayUnlock unlocks SafeArray for reading. +// +// AKA: SafeArrayUnlock in Windows API. +func safeArrayUnlock(safearray *SafeArray) (err error) { + err = convertHresultToError(procSafeArrayUnlock.Call(uintptr(unsafe.Pointer(safearray)))) + return +} + +// safeArrayPutElement stores the data element at the specified location in the +// array. +// +// AKA: SafeArrayPutElement in Windows API. +func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) (err error) { + err = convertHresultToError( + procSafeArrayPutElement.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&index)), + uintptr(unsafe.Pointer(element)))) + return +} + +// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. +// +// AKA: SafeArrayGetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArrayGetRecordInfo(safearray *SafeArray) (recordInfo interface{}, err error) { + err = convertHresultToError( + procSafeArrayGetRecordInfo.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&recordInfo)))) + return +} + +// safeArraySetRecordInfo mutates IRecordInfo info for custom types. +// +// AKA: SafeArraySetRecordInfo in Windows API. +// +// XXX: Must implement IRecordInfo interface for this to return. +func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) (err error) { + err = convertHresultToError( + procSafeArraySetRecordInfo.Call( + uintptr(unsafe.Pointer(safearray)), + uintptr(unsafe.Pointer(&recordInfo)))) + return +} diff --git a/vendor/github.com/go-ole/go-ole/safearrayconversion.go b/vendor/github.com/go-ole/go-ole/safearrayconversion.go new file mode 100644 index 0000000000000..da737293d7cfb --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearrayconversion.go @@ -0,0 +1,140 @@ +// Helper for converting SafeArray to array of objects. + +package ole + +import ( + "unsafe" +) + +type SafeArrayConversion struct { + Array *SafeArray +} + +func (sac *SafeArrayConversion) ToStringArray() (strings []string) { + totalElements, _ := sac.TotalElements(0) + strings = make([]string, totalElements) + + for i := int32(0); i < totalElements; i++ { + strings[int32(i)], _ = safeArrayGetElementString(sac.Array, i) + } + + return +} + +func (sac *SafeArrayConversion) ToByteArray() (bytes []byte) { + totalElements, _ := sac.TotalElements(0) + bytes = make([]byte, totalElements) + + for i := int32(0); i < totalElements; i++ { + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&bytes[int32(i)])) + } + + return +} + +func (sac *SafeArrayConversion) ToValueArray() (values []interface{}) { + totalElements, _ := sac.TotalElements(0) + values = make([]interface{}, totalElements) + vt, _ := safeArrayGetVartype(sac.Array) + + for i := int32(0); i < totalElements; i++ { + switch VT(vt) { + case VT_BOOL: + var v bool + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I1: + var v int8 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I2: + var v int16 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I4: + var v int32 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_I8: + var v int64 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI1: + var v uint8 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI2: + var v uint16 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI4: + var v uint32 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_UI8: + var v uint64 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_R4: + var v float32 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_R8: + var v float64 + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v + case VT_BSTR: + v , _ := safeArrayGetElementString(sac.Array, i) + values[i] = v + case VT_VARIANT: + var v VARIANT + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) + values[i] = v.Value() + v.Clear() + default: + // TODO + } + } + + return +} + +func (sac *SafeArrayConversion) GetType() (varType uint16, err error) { + return safeArrayGetVartype(sac.Array) +} + +func (sac *SafeArrayConversion) GetDimensions() (dimensions *uint32, err error) { + return safeArrayGetDim(sac.Array) +} + +func (sac *SafeArrayConversion) GetSize() (length *uint32, err error) { + return safeArrayGetElementSize(sac.Array) +} + +func (sac *SafeArrayConversion) TotalElements(index uint32) (totalElements int32, err error) { + if index < 1 { + index = 1 + } + + // Get array bounds + var LowerBounds int32 + var UpperBounds int32 + + LowerBounds, err = safeArrayGetLBound(sac.Array, index) + if err != nil { + return + } + + UpperBounds, err = safeArrayGetUBound(sac.Array, index) + if err != nil { + return + } + + totalElements = UpperBounds - LowerBounds + 1 + return +} + +// Release Safe Array memory +func (sac *SafeArrayConversion) Release() { + safeArrayDestroy(sac.Array) +} diff --git a/vendor/github.com/go-ole/go-ole/safearrayslices.go b/vendor/github.com/go-ole/go-ole/safearrayslices.go new file mode 100644 index 0000000000000..a9fa885f1d814 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/safearrayslices.go @@ -0,0 +1,33 @@ +// +build windows + +package ole + +import ( + "unsafe" +) + +func safeArrayFromByteSlice(slice []byte) *SafeArray { + array, _ := safeArrayCreateVector(VT_UI1, 0, uint32(len(slice))) + + if array == nil { + panic("Could not convert []byte to SAFEARRAY") + } + + for i, v := range slice { + safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(&v))) + } + return array +} + +func safeArrayFromStringSlice(slice []string) *SafeArray { + array, _ := safeArrayCreateVector(VT_BSTR, 0, uint32(len(slice))) + + if array == nil { + panic("Could not convert []string to SAFEARRAY") + } + // SysAllocStringLen(s) + for i, v := range slice { + safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(SysAllocStringLen(v)))) + } + return array +} diff --git a/vendor/github.com/go-ole/go-ole/utility.go b/vendor/github.com/go-ole/go-ole/utility.go new file mode 100644 index 0000000000000..99ee82dc34515 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/utility.go @@ -0,0 +1,101 @@ +package ole + +import ( + "unicode/utf16" + "unsafe" +) + +// ClassIDFrom retrieves class ID whether given is program ID or application string. +// +// Helper that provides check against both Class ID from Program ID and Class ID from string. It is +// faster, if you know which you are using, to use the individual functions, but this will check +// against available functions for you. +func ClassIDFrom(programID string) (classID *GUID, err error) { + classID, err = CLSIDFromProgID(programID) + if err != nil { + classID, err = CLSIDFromString(programID) + if err != nil { + return + } + } + return +} + +// BytePtrToString converts byte pointer to a Go string. +func BytePtrToString(p *byte) string { + a := (*[10000]uint8)(unsafe.Pointer(p)) + i := 0 + for a[i] != 0 { + i++ + } + return string(a[:i]) +} + +// UTF16PtrToString is alias for LpOleStrToString. +// +// Kept for compatibility reasons. +func UTF16PtrToString(p *uint16) string { + return LpOleStrToString(p) +} + +// LpOleStrToString converts COM Unicode to Go string. +func LpOleStrToString(p *uint16) string { + if p == nil { + return "" + } + + length := lpOleStrLen(p) + a := make([]uint16, length) + + ptr := unsafe.Pointer(p) + + for i := 0; i < int(length); i++ { + a[i] = *(*uint16)(ptr) + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + + return string(utf16.Decode(a)) +} + +// BstrToString converts COM binary string to Go string. +func BstrToString(p *uint16) string { + if p == nil { + return "" + } + length := SysStringLen((*int16)(unsafe.Pointer(p))) + a := make([]uint16, length) + + ptr := unsafe.Pointer(p) + + for i := 0; i < int(length); i++ { + a[i] = *(*uint16)(ptr) + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + return string(utf16.Decode(a)) +} + +// lpOleStrLen returns the length of Unicode string. +func lpOleStrLen(p *uint16) (length int64) { + if p == nil { + return 0 + } + + ptr := unsafe.Pointer(p) + + for i := 0; ; i++ { + if 0 == *(*uint16)(ptr) { + length = int64(i) + break + } + ptr = unsafe.Pointer(uintptr(ptr) + 2) + } + return +} + +// convertHresultToError converts syscall to error, if call is unsuccessful. +func convertHresultToError(hr uintptr, r2 uintptr, ignore error) (err error) { + if hr != 0 { + err = NewError(hr) + } + return +} diff --git a/vendor/github.com/go-ole/go-ole/variables.go b/vendor/github.com/go-ole/go-ole/variables.go new file mode 100644 index 0000000000000..a6add1b0066af --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variables.go @@ -0,0 +1,15 @@ +// +build windows + +package ole + +import ( + "golang.org/x/sys/windows" +) + +var ( + modcombase = windows.NewLazySystemDLL("combase.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modole32 = windows.NewLazySystemDLL("ole32.dll") + modoleaut32 = windows.NewLazySystemDLL("oleaut32.dll") + moduser32 = windows.NewLazySystemDLL("user32.dll") +) diff --git a/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/go-ole/go-ole/variant.go new file mode 100644 index 0000000000000..967a23fea9aba --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant.go @@ -0,0 +1,105 @@ +package ole + +import "unsafe" + +// NewVariant returns new variant based on type and value. +func NewVariant(vt VT, val int64) VARIANT { + return VARIANT{VT: vt, Val: val} +} + +// ToIUnknown converts Variant to Unknown object. +func (v *VARIANT) ToIUnknown() *IUnknown { + if v.VT != VT_UNKNOWN { + return nil + } + return (*IUnknown)(unsafe.Pointer(uintptr(v.Val))) +} + +// ToIDispatch converts variant to dispatch object. +func (v *VARIANT) ToIDispatch() *IDispatch { + if v.VT != VT_DISPATCH { + return nil + } + return (*IDispatch)(unsafe.Pointer(uintptr(v.Val))) +} + +// ToArray converts variant to SafeArray helper. +func (v *VARIANT) ToArray() *SafeArrayConversion { + if v.VT != VT_SAFEARRAY { + if v.VT&VT_ARRAY == 0 { + return nil + } + } + var safeArray *SafeArray = (*SafeArray)(unsafe.Pointer(uintptr(v.Val))) + return &SafeArrayConversion{safeArray} +} + +// ToString converts variant to Go string. +func (v *VARIANT) ToString() string { + if v.VT != VT_BSTR { + return "" + } + return BstrToString(*(**uint16)(unsafe.Pointer(&v.Val))) +} + +// Clear the memory of variant object. +func (v *VARIANT) Clear() error { + return VariantClear(v) +} + +// Value returns variant value based on its type. +// +// Currently supported types: 2- and 4-byte integers, strings, bools. +// Note that 64-bit integers, datetimes, and other types are stored as strings +// and will be returned as strings. +// +// Needs to be further converted, because this returns an interface{}. +func (v *VARIANT) Value() interface{} { + switch v.VT { + case VT_I1: + return int8(v.Val) + case VT_UI1: + return uint8(v.Val) + case VT_I2: + return int16(v.Val) + case VT_UI2: + return uint16(v.Val) + case VT_I4: + return int32(v.Val) + case VT_UI4: + return uint32(v.Val) + case VT_I8: + return int64(v.Val) + case VT_UI8: + return uint64(v.Val) + case VT_INT: + return int(v.Val) + case VT_UINT: + return uint(v.Val) + case VT_INT_PTR: + return uintptr(v.Val) // TODO + case VT_UINT_PTR: + return uintptr(v.Val) + case VT_R4: + return *(*float32)(unsafe.Pointer(&v.Val)) + case VT_R8: + return *(*float64)(unsafe.Pointer(&v.Val)) + case VT_BSTR: + return v.ToString() + case VT_DATE: + // VT_DATE type will either return float64 or time.Time. + d := uint64(v.Val) + date, err := GetVariantDate(d) + if err != nil { + return float64(v.Val) + } + return date + case VT_UNKNOWN: + return v.ToIUnknown() + case VT_DISPATCH: + return v.ToIDispatch() + case VT_BOOL: + return v.Val != 0 + } + return nil +} diff --git a/vendor/github.com/go-ole/go-ole/variant_386.go b/vendor/github.com/go-ole/go-ole/variant_386.go new file mode 100644 index 0000000000000..e73736bf39179 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_386.go @@ -0,0 +1,11 @@ +// +build 386 + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_amd64.go b/vendor/github.com/go-ole/go-ole/variant_amd64.go new file mode 100644 index 0000000000000..dccdde132333c --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_amd64.go @@ -0,0 +1,12 @@ +// +build amd64 + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_arm.go b/vendor/github.com/go-ole/go-ole/variant_arm.go new file mode 100644 index 0000000000000..d4724544437ba --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_arm.go @@ -0,0 +1,11 @@ +// +build arm + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_arm64.go b/vendor/github.com/go-ole/go-ole/variant_arm64.go new file mode 100644 index 0000000000000..78473cec4f6b2 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_arm64.go @@ -0,0 +1,13 @@ +//go:build arm64 +// +build arm64 + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_386.go b/vendor/github.com/go-ole/go-ole/variant_date_386.go new file mode 100644 index 0000000000000..1b970f63f5fbc --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_date_386.go @@ -0,0 +1,22 @@ +// +build windows,386 + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + v1 := uint32(value) + v2 := uint32(value >> 32) + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_amd64.go b/vendor/github.com/go-ole/go-ole/variant_date_amd64.go new file mode 100644 index 0000000000000..6952f1f0de64e --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_date_amd64.go @@ -0,0 +1,20 @@ +// +build windows,amd64 + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(value), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_arm.go b/vendor/github.com/go-ole/go-ole/variant_date_arm.go new file mode 100644 index 0000000000000..09ec7b5cfdfca --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_date_arm.go @@ -0,0 +1,22 @@ +// +build windows,arm + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + v1 := uint32(value) + v2 := uint32(value >> 32) + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_arm64.go b/vendor/github.com/go-ole/go-ole/variant_date_arm64.go new file mode 100644 index 0000000000000..02b04a0d4af4b --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_date_arm64.go @@ -0,0 +1,23 @@ +//go:build windows && arm64 +// +build windows,arm64 + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + v1 := uint32(value) + v2 := uint32(value >> 32) + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/go-ole/go-ole/variant_ppc64le.go b/vendor/github.com/go-ole/go-ole/variant_ppc64le.go new file mode 100644 index 0000000000000..326427a7d1444 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_ppc64le.go @@ -0,0 +1,12 @@ +// +build ppc64le + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/variant_s390x.go b/vendor/github.com/go-ole/go-ole/variant_s390x.go new file mode 100644 index 0000000000000..9874ca66b4f5f --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/variant_s390x.go @@ -0,0 +1,12 @@ +// +build s390x + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/go-ole/go-ole/vt_string.go b/vendor/github.com/go-ole/go-ole/vt_string.go new file mode 100644 index 0000000000000..729b4a04dd9da --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/vt_string.go @@ -0,0 +1,58 @@ +// generated by stringer -output vt_string.go -type VT; DO NOT EDIT + +package ole + +import "fmt" + +const ( + _VT_name_0 = "VT_EMPTYVT_NULLVT_I2VT_I4VT_R4VT_R8VT_CYVT_DATEVT_BSTRVT_DISPATCHVT_ERRORVT_BOOLVT_VARIANTVT_UNKNOWNVT_DECIMAL" + _VT_name_1 = "VT_I1VT_UI1VT_UI2VT_UI4VT_I8VT_UI8VT_INTVT_UINTVT_VOIDVT_HRESULTVT_PTRVT_SAFEARRAYVT_CARRAYVT_USERDEFINEDVT_LPSTRVT_LPWSTR" + _VT_name_2 = "VT_RECORDVT_INT_PTRVT_UINT_PTR" + _VT_name_3 = "VT_FILETIMEVT_BLOBVT_STREAMVT_STORAGEVT_STREAMED_OBJECTVT_STORED_OBJECTVT_BLOB_OBJECTVT_CFVT_CLSID" + _VT_name_4 = "VT_BSTR_BLOBVT_VECTOR" + _VT_name_5 = "VT_ARRAY" + _VT_name_6 = "VT_BYREF" + _VT_name_7 = "VT_RESERVED" + _VT_name_8 = "VT_ILLEGAL" +) + +var ( + _VT_index_0 = [...]uint8{0, 8, 15, 20, 25, 30, 35, 40, 47, 54, 65, 73, 80, 90, 100, 110} + _VT_index_1 = [...]uint8{0, 5, 11, 17, 23, 28, 34, 40, 47, 54, 64, 70, 82, 91, 105, 113, 122} + _VT_index_2 = [...]uint8{0, 9, 19, 30} + _VT_index_3 = [...]uint8{0, 11, 18, 27, 37, 55, 71, 85, 90, 98} + _VT_index_4 = [...]uint8{0, 12, 21} + _VT_index_5 = [...]uint8{0, 8} + _VT_index_6 = [...]uint8{0, 8} + _VT_index_7 = [...]uint8{0, 11} + _VT_index_8 = [...]uint8{0, 10} +) + +func (i VT) String() string { + switch { + case 0 <= i && i <= 14: + return _VT_name_0[_VT_index_0[i]:_VT_index_0[i+1]] + case 16 <= i && i <= 31: + i -= 16 + return _VT_name_1[_VT_index_1[i]:_VT_index_1[i+1]] + case 36 <= i && i <= 38: + i -= 36 + return _VT_name_2[_VT_index_2[i]:_VT_index_2[i+1]] + case 64 <= i && i <= 72: + i -= 64 + return _VT_name_3[_VT_index_3[i]:_VT_index_3[i+1]] + case 4095 <= i && i <= 4096: + i -= 4095 + return _VT_name_4[_VT_index_4[i]:_VT_index_4[i+1]] + case i == 8192: + return _VT_name_5 + case i == 16384: + return _VT_name_6 + case i == 32768: + return _VT_name_7 + case i == 65535: + return _VT_name_8 + default: + return fmt.Sprintf("VT(%d)", i) + } +} diff --git a/vendor/github.com/go-ole/go-ole/winrt.go b/vendor/github.com/go-ole/go-ole/winrt.go new file mode 100644 index 0000000000000..4e9eca73244ee --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/winrt.go @@ -0,0 +1,99 @@ +// +build windows + +package ole + +import ( + "reflect" + "syscall" + "unicode/utf8" + "unsafe" +) + +var ( + procRoInitialize = modcombase.NewProc("RoInitialize") + procRoActivateInstance = modcombase.NewProc("RoActivateInstance") + procRoGetActivationFactory = modcombase.NewProc("RoGetActivationFactory") + procWindowsCreateString = modcombase.NewProc("WindowsCreateString") + procWindowsDeleteString = modcombase.NewProc("WindowsDeleteString") + procWindowsGetStringRawBuffer = modcombase.NewProc("WindowsGetStringRawBuffer") +) + +func RoInitialize(thread_type uint32) (err error) { + hr, _, _ := procRoInitialize.Call(uintptr(thread_type)) + if hr != 0 { + err = NewError(hr) + } + return +} + +func RoActivateInstance(clsid string) (ins *IInspectable, err error) { + hClsid, err := NewHString(clsid) + if err != nil { + return nil, err + } + defer DeleteHString(hClsid) + + hr, _, _ := procRoActivateInstance.Call( + uintptr(unsafe.Pointer(hClsid)), + uintptr(unsafe.Pointer(&ins))) + if hr != 0 { + err = NewError(hr) + } + return +} + +func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { + hClsid, err := NewHString(clsid) + if err != nil { + return nil, err + } + defer DeleteHString(hClsid) + + hr, _, _ := procRoGetActivationFactory.Call( + uintptr(unsafe.Pointer(hClsid)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&ins))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// HString is handle string for pointers. +type HString uintptr + +// NewHString returns a new HString for Go string. +func NewHString(s string) (hstring HString, err error) { + u16 := syscall.StringToUTF16Ptr(s) + len := uint32(utf8.RuneCountInString(s)) + hr, _, _ := procWindowsCreateString.Call( + uintptr(unsafe.Pointer(u16)), + uintptr(len), + uintptr(unsafe.Pointer(&hstring))) + if hr != 0 { + err = NewError(hr) + } + return +} + +// DeleteHString deletes HString. +func DeleteHString(hstring HString) (err error) { + hr, _, _ := procWindowsDeleteString.Call(uintptr(hstring)) + if hr != 0 { + err = NewError(hr) + } + return +} + +// String returns Go string value of HString. +func (h HString) String() string { + var u16buf uintptr + var u16len uint32 + u16buf, _, _ = procWindowsGetStringRawBuffer.Call( + uintptr(h), + uintptr(unsafe.Pointer(&u16len))) + + u16hdr := reflect.SliceHeader{Data: u16buf, Len: int(u16len), Cap: int(u16len)} + u16 := *(*[]uint16)(unsafe.Pointer(&u16hdr)) + return syscall.UTF16ToString(u16) +} diff --git a/vendor/github.com/go-ole/go-ole/winrt_doc.go b/vendor/github.com/go-ole/go-ole/winrt_doc.go new file mode 100644 index 0000000000000..52e6d74c9ab3a --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/winrt_doc.go @@ -0,0 +1,36 @@ +// +build !windows + +package ole + +// RoInitialize +func RoInitialize(thread_type uint32) (err error) { + return NewError(E_NOTIMPL) +} + +// RoActivateInstance +func RoActivateInstance(clsid string) (ins *IInspectable, err error) { + return nil, NewError(E_NOTIMPL) +} + +// RoGetActivationFactory +func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { + return nil, NewError(E_NOTIMPL) +} + +// HString is handle string for pointers. +type HString uintptr + +// NewHString returns a new HString for Go string. +func NewHString(s string) (hstring HString, err error) { + return HString(uintptr(0)), NewError(E_NOTIMPL) +} + +// DeleteHString deletes HString. +func DeleteHString(hstring HString) (err error) { + return NewError(E_NOTIMPL) +} + +// String returns Go string value of HString. +func (h HString) String() string { + return "" +} diff --git a/vendor/github.com/lufia/plan9stats/.gitignore b/vendor/github.com/lufia/plan9stats/.gitignore new file mode 100644 index 0000000000000..f1c181ec9c5c9 --- /dev/null +++ b/vendor/github.com/lufia/plan9stats/.gitignore @@ -0,0 +1,12 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/lufia/plan9stats/LICENSE b/vendor/github.com/lufia/plan9stats/LICENSE new file mode 100644 index 0000000000000..a6d47e8071825 --- /dev/null +++ b/vendor/github.com/lufia/plan9stats/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, KADOTA, Kyohei +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/lufia/plan9stats/README.md b/vendor/github.com/lufia/plan9stats/README.md new file mode 100644 index 0000000000000..a21700c0cf335 --- /dev/null +++ b/vendor/github.com/lufia/plan9stats/README.md @@ -0,0 +1,2 @@ +# plan9stats +A module for retrieving statistics of Plan 9 diff --git a/vendor/github.com/lufia/plan9stats/cpu.go b/vendor/github.com/lufia/plan9stats/cpu.go new file mode 100644 index 0000000000000..a101b911906e4 --- /dev/null +++ b/vendor/github.com/lufia/plan9stats/cpu.go @@ -0,0 +1,288 @@ +package stats + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "time" +) + +// CPUType represents /dev/cputype. +type CPUType struct { + Name string + Clock int // clock rate in MHz +} + +func ReadCPUType(ctx context.Context, opts ...Option) (*CPUType, error) { + cfg := newConfig(opts...) + var c CPUType + if err := readCPUType(cfg.rootdir, &c); err != nil { + return nil, err + } + return &c, nil +} + +type SysStats struct { + ID int + NumCtxSwitch int64 + NumInterrupt int64 + NumSyscall int64 + NumFault int64 + NumTLBFault int64 + NumTLBPurge int64 + LoadAvg int64 // in units of milli-CPUs and is decayed over time + Idle int // percentage + Interrupt int // percentage +} + +// ReadSysStats reads system statistics from /dev/sysstat. +func ReadSysStats(ctx context.Context, opts ...Option) ([]*SysStats, error) { + cfg := newConfig(opts...) + file := filepath.Join(cfg.rootdir, "/dev/sysstat") + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + var stats []*SysStats + for scanner.Scan() { + a := strings.Fields(scanner.Text()) + if len(a) != 10 { + continue + } + var ( + p intParser + stat SysStats + ) + stat.ID = p.ParseInt(a[0], 10) + stat.NumCtxSwitch = p.ParseInt64(a[1], 10) + stat.NumInterrupt = p.ParseInt64(a[2], 10) + stat.NumSyscall = p.ParseInt64(a[3], 10) + stat.NumFault = p.ParseInt64(a[4], 10) + stat.NumTLBFault = p.ParseInt64(a[5], 10) + stat.NumTLBPurge = p.ParseInt64(a[6], 10) + stat.LoadAvg = p.ParseInt64(a[7], 10) + stat.Idle = p.ParseInt(a[8], 10) + stat.Interrupt = p.ParseInt(a[9], 10) + if err := p.Err(); err != nil { + return nil, err + } + stats = append(stats, &stat) + } + if err := scanner.Err(); err != nil { + return nil, err + } + return stats, nil +} + +func readCPUType(rootdir string, c *CPUType) error { + file := filepath.Join(rootdir, "/dev/cputype") + b, err := ioutil.ReadFile(file) + if err != nil { + return err + } + b = bytes.TrimSpace(b) + i := bytes.LastIndexByte(b, ' ') + if i < 0 { + return fmt.Errorf("%s: invalid format", file) + } + clock, err := strconv.Atoi(string(b[i+1:])) + if err != nil { + return err + } + c.Name = string(b[:i]) + c.Clock = clock + return nil +} + +// Time represents /dev/time. +type Time struct { + Unix time.Duration + UnixNano time.Duration + Ticks int64 // clock ticks + Freq int64 //cloc frequency +} + +// Uptime returns uptime. +func (t *Time) Uptime() time.Duration { + v := float64(t.Ticks) / float64(t.Freq) + return time.Duration(v*1000_000_000) * time.Nanosecond +} + +func ReadTime(ctx context.Context, opts ...Option) (*Time, error) { + cfg := newConfig(opts...) + file := filepath.Join(cfg.rootdir, "/dev/time") + var t Time + if err := readTime(file, &t); err != nil { + return nil, err + } + return &t, nil +} + +// ProcStatus represents a /proc/n/status. +type ProcStatus struct { + Name string + User string + State string + Times CPUTime + MemUsed int64 // in units of 1024 bytes + BasePriority uint32 // 0(low) to 19(high) + Priority uint32 // 0(low) to 19(high) +} + +// CPUTime represents /dev/cputime or a part of /proc/n/status. +type CPUTime struct { + User time.Duration // the time in user mode (millisecconds) + Sys time.Duration + Real time.Duration + ChildUser time.Duration // exited children and descendants time in user mode + ChildSys time.Duration + ChildReal time.Duration +} + +// CPUStats emulates Linux's /proc/stat. +type CPUStats struct { + User time.Duration + Sys time.Duration + Idle time.Duration +} + +func ReadCPUStats(ctx context.Context, opts ...Option) (*CPUStats, error) { + cfg := newConfig(opts...) + a, err := ReadSysStats(ctx, opts...) + if err != nil { + return nil, err + } + + dir := filepath.Join(cfg.rootdir, "/proc") + d, err := os.Open(dir) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(0) + if err != nil { + return nil, err + } + var up uint32parser + pids := make([]uint32, len(names)) + for i, s := range names { + pids[i] = up.Parse(s) + } + if up.err != nil { + return nil, err + } + sort.Slice(pids, func(i, j int) bool { + return pids[i] < pids[j] + }) + + var stat CPUStats + for _, pid := range pids { + s := strconv.FormatUint(uint64(pid), 10) + file := filepath.Join(dir, s, "status") + var p ProcStatus + if err := readProcStatus(file, &p); err != nil { + return nil, err + } + stat.User += p.Times.User + stat.Sys += p.Times.Sys + } + + var t Time + file := filepath.Join(cfg.rootdir, "/dev/time") + if err := readTime(file, &t); err != nil { + return nil, err + } + // In multi-processor host, Idle should multiple by number of cores. + u := t.Uptime() * time.Duration(len(a)) + stat.Idle = u - stat.User - stat.Sys + return &stat, nil +} + +func readProcStatus(file string, p *ProcStatus) error { + b, err := ioutil.ReadFile(file) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + fields := strings.Fields(string(b)) + if len(fields) != 12 { + return errors.New("invalid format") + } + p.Name = string(fields[0]) + p.User = string(fields[1]) + p.State = string(fields[2]) + var up uint32parser + p.Times.User = time.Duration(up.Parse(fields[3])) * time.Millisecond + p.Times.Sys = time.Duration(up.Parse(fields[4])) * time.Millisecond + p.Times.Real = time.Duration(up.Parse(fields[5])) * time.Millisecond + p.Times.ChildUser = time.Duration(up.Parse(fields[6])) * time.Millisecond + p.Times.ChildSys = time.Duration(up.Parse(fields[7])) * time.Millisecond + p.Times.ChildReal = time.Duration(up.Parse(fields[8])) * time.Millisecond + p.MemUsed, err = strconv.ParseInt(fields[9], 10, 64) + if err != nil { + return err + } + p.BasePriority = up.Parse(fields[10]) + p.Priority = up.Parse(fields[11]) + return up.err +} + +func readTime(file string, t *Time) error { + b, err := ioutil.ReadFile(file) + if err != nil { + return err + } + fields := strings.Fields(string(b)) + if len(fields) != 4 { + return errors.New("invalid format") + } + n, err := strconv.ParseInt(fields[0], 10, 32) + if err != nil { + return err + } + t.Unix = time.Duration(n) * time.Second + v, err := strconv.ParseInt(fields[1], 10, 64) + if err != nil { + return err + } + t.UnixNano = time.Duration(v) * time.Nanosecond + t.Ticks, err = strconv.ParseInt(fields[2], 10, 64) + if err != nil { + return err + } + t.Freq, err = strconv.ParseInt(fields[3], 10, 64) + if err != nil { + return err + } + return nil +} + +type uint32parser struct { + err error +} + +func (p *uint32parser) Parse(s string) uint32 { + if p.err != nil { + return 0 + } + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + p.err = err + return 0 + } + return uint32(n) +} diff --git a/vendor/github.com/lufia/plan9stats/doc.go b/vendor/github.com/lufia/plan9stats/doc.go new file mode 100644 index 0000000000000..10e398e7a8797 --- /dev/null +++ b/vendor/github.com/lufia/plan9stats/doc.go @@ -0,0 +1,2 @@ +// Package stats provides statistic utilities for Plan 9. +package stats diff --git a/vendor/github.com/lufia/plan9stats/host.go b/vendor/github.com/lufia/plan9stats/host.go new file mode 100644 index 0000000000000..957e903489515 --- /dev/null +++ b/vendor/github.com/lufia/plan9stats/host.go @@ -0,0 +1,303 @@ +package stats + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "strconv" + "strings" +) + +var ( + delim = []byte{' '} +) + +// Host represents host status. +type Host struct { + Sysname string + Storages []*Storage + Interfaces []*Interface +} + +// MemStats represents the memory statistics. +type MemStats struct { + Total int64 // total memory in byte + PageSize int64 // a page size in byte + KernelPages int64 + UserPages Gauge + SwapPages Gauge + + Malloced Gauge // kernel malloced data in byte + Graphics Gauge // kernel graphics data in byte +} + +// Gauge is used/available gauge. +type Gauge struct { + Used int64 + Avail int64 +} + +func (g Gauge) Free() int64 { + return g.Avail - g.Used +} + +// ReadMemStats reads memory statistics from /dev/swap. +func ReadMemStats(ctx context.Context, opts ...Option) (*MemStats, error) { + cfg := newConfig(opts...) + swap := filepath.Join(cfg.rootdir, "/dev/swap") + f, err := os.Open(swap) + if err != nil { + return nil, err + } + defer f.Close() + + var stat MemStats + m := map[string]interface{}{ + "memory": &stat.Total, + "pagesize": &stat.PageSize, + "kernel": &stat.KernelPages, + "user": &stat.UserPages, + "swap": &stat.SwapPages, + "kernel malloc": &stat.Malloced, + "kernel draw": &stat.Graphics, + } + scanner := bufio.NewScanner(f) + for scanner.Scan() { + fields := bytes.SplitN(scanner.Bytes(), delim, 2) + if len(fields) < 2 { + continue + } + switch key := string(fields[1]); key { + case "memory", "pagesize", "kernel": + v := m[key].(*int64) + n, err := strconv.ParseInt(string(fields[0]), 10, 64) + if err != nil { + return nil, err + } + *v = n + case "user", "swap", "kernel malloc", "kernel draw": + v := m[key].(*Gauge) + if err := parseGauge(string(fields[0]), v); err != nil { + return nil, err + } + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + return &stat, nil +} + +func parseGauge(s string, r *Gauge) error { + a := strings.SplitN(s, "/", 2) + if len(a) != 2 { + return fmt.Errorf("can't parse ratio: %s", s) + } + var p intParser + u := p.ParseInt64(a[0], 10) + n := p.ParseInt64(a[1], 10) + if err := p.Err(); err != nil { + return err + } + r.Used = u + r.Avail = n + return nil +} + +type Storage struct { + Name string + Model string + Capacity int64 +} + +type Interface struct { + Name string + Addr string +} + +const ( + numEther = 8 // see ether(3) + numIpifc = 16 // see ip(3) +) + +// ReadInterfaces reads network interfaces from etherN. +func ReadInterfaces(ctx context.Context, opts ...Option) ([]*Interface, error) { + cfg := newConfig(opts...) + var a []*Interface + for i := 0; i < numEther; i++ { + p, err := readInterface(cfg.rootdir, i) + if os.IsNotExist(err) { + continue + } + if err != nil { + return nil, err + } + a = append(a, p) + } + return a, nil +} + +func readInterface(netroot string, i int) (*Interface, error) { + ether := fmt.Sprintf("ether%d", i) + dir := filepath.Join(netroot, ether) + info, err := os.Stat(dir) + if err != nil { + return nil, err + } + if !info.IsDir() { + return nil, fmt.Errorf("%s: is not directory", dir) + } + + addr, err := ioutil.ReadFile(filepath.Join(dir, "addr")) + if err != nil { + return nil, err + } + return &Interface{ + Name: ether, + Addr: string(addr), + }, nil +} + +var ( + netdirs = []string{"/net", "/net.alt"} +) + +// ReadHost reads host status. +func ReadHost(ctx context.Context, opts ...Option) (*Host, error) { + cfg := newConfig(opts...) + var h Host + name, err := readSysname(cfg.rootdir) + if err != nil { + return nil, err + } + h.Sysname = name + + a, err := readStorages(cfg.rootdir) + if err != nil { + return nil, err + } + h.Storages = a + + for _, s := range netdirs { + netroot := filepath.Join(cfg.rootdir, s) + ifaces, err := ReadInterfaces(ctx, WithRootDir(netroot)) + if err != nil { + return nil, err + } + h.Interfaces = append(h.Interfaces, ifaces...) + } + return &h, nil +} + +func readSysname(rootdir string) (string, error) { + file := filepath.Join(rootdir, "/dev/sysname") + b, err := ioutil.ReadFile(file) + if err != nil { + return "", err + } + return string(bytes.TrimSpace(b)), nil +} + +func readStorages(rootdir string) ([]*Storage, error) { + sdctl := filepath.Join(rootdir, "/dev/sdctl") + f, err := os.Open(sdctl) + if err != nil { + return nil, err + } + defer f.Close() + + var a []*Storage + scanner := bufio.NewScanner(f) + for scanner.Scan() { + fields := bytes.Split(scanner.Bytes(), delim) + if len(fields) == 0 { + continue + } + exp := string(fields[0]) + "*" + if !strings.HasPrefix(exp, "sd") { + continue + } + dir := filepath.Join(rootdir, "/dev", exp) + m, err := filepath.Glob(dir) + if err != nil { + return nil, err + } + for _, dir := range m { + s, err := readStorage(dir) + if err != nil { + return nil, err + } + a = append(a, s) + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + return a, nil +} + +func readStorage(dir string) (*Storage, error) { + ctl := filepath.Join(dir, "ctl") + f, err := os.Open(ctl) + if err != nil { + return nil, err + } + defer f.Close() + + var s Storage + s.Name = filepath.Base(dir) + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Bytes() + switch { + case bytes.HasPrefix(line, []byte("inquiry")): + s.Model = string(bytes.TrimSpace(line[7:])) + case bytes.HasPrefix(line, []byte("geometry")): + fields := bytes.Split(line, delim) + if len(fields) < 3 { + continue + } + var p intParser + sec := p.ParseInt64(string(fields[1]), 10) + size := p.ParseInt64(string(fields[2]), 10) + if err := p.Err(); err != nil { + return nil, err + } + s.Capacity = sec * size + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + return &s, nil +} + +type IPStats struct { + ID int // number of interface in ipifc dir + Device string // associated physical device + MTU int // max transfer unit + Sendra6 uint8 // on == send router adv + Recvra6 uint8 // on == recv router adv + + Pktin int64 // packets read + Pktout int64 // packets written + Errin int64 // read errors + Errout int64 // write errors +} + +type Iplifc struct { + IP net.IP + Mask net.IPMask + Net net.IP // ip & mask + PerfLifetime int64 // preferred lifetime + ValidLifetime int64 // valid lifetime +} + +type Ipv6rp struct { + // TODO(lufia): see ip(2) +} diff --git a/vendor/github.com/lufia/plan9stats/int.go b/vendor/github.com/lufia/plan9stats/int.go new file mode 100644 index 0000000000000..db133c43ed1c9 --- /dev/null +++ b/vendor/github.com/lufia/plan9stats/int.go @@ -0,0 +1,31 @@ +package stats + +import ( + "strconv" +) + +type intParser struct { + err error +} + +func (p *intParser) ParseInt(s string, base int) int { + if p.err != nil { + return 0 + } + var n int64 + n, p.err = strconv.ParseInt(s, base, 0) + return int(n) +} + +func (p *intParser) ParseInt64(s string, base int) int64 { + if p.err != nil { + return 0 + } + var n int64 + n, p.err = strconv.ParseInt(s, base, 64) + return n +} + +func (p *intParser) Err() error { + return p.err +} diff --git a/vendor/github.com/lufia/plan9stats/opts.go b/vendor/github.com/lufia/plan9stats/opts.go new file mode 100644 index 0000000000000..05b7d036a2d50 --- /dev/null +++ b/vendor/github.com/lufia/plan9stats/opts.go @@ -0,0 +1,21 @@ +package stats + +type Config struct { + rootdir string +} + +type Option func(*Config) + +func newConfig(opts ...Option) *Config { + var cfg Config + for _, opt := range opts { + opt(&cfg) + } + return &cfg +} + +func WithRootDir(dir string) Option { + return func(cfg *Config) { + cfg.rootdir = dir + } +} diff --git a/vendor/github.com/lufia/plan9stats/stats.go b/vendor/github.com/lufia/plan9stats/stats.go new file mode 100644 index 0000000000000..d4ecdcfa079c1 --- /dev/null +++ b/vendor/github.com/lufia/plan9stats/stats.go @@ -0,0 +1,88 @@ +package stats + +import ( + "bufio" + "context" + "os" + "path/filepath" + "strings" +) + +type InterfaceStats struct { + PacketsReceived int64 // in packets + Link int // link status + PacketsSent int64 // out packets + NumCRCErr int // input CRC errors + NumOverflows int // packet overflows + NumSoftOverflows int // software overflow + NumFramingErr int // framing errors + NumBufferingErr int // buffering errors + NumOutputErr int // output errors + Promiscuous int // number of promiscuous opens + Mbps int // megabits per sec + Addr string +} + +func ReadInterfaceStats(ctx context.Context, opts ...Option) (*InterfaceStats, error) { + cfg := newConfig(opts...) + file := filepath.Join(cfg.rootdir, "stats") + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + + var stats InterfaceStats + scanner := bufio.NewScanner(f) + for scanner.Scan() { + s := strings.TrimSpace(scanner.Text()) + a := strings.SplitN(s, ":", 2) + if len(a) != 2 { + continue + } + var p intParser + v := strings.TrimSpace(a[1]) + switch a[0] { + case "in": + stats.PacketsReceived = p.ParseInt64(v, 10) + case "link": + stats.Link = p.ParseInt(v, 10) + case "out": + stats.PacketsSent = p.ParseInt64(v, 10) + case "crc": + stats.NumCRCErr = p.ParseInt(v, 10) + case "overflows": + stats.NumOverflows = p.ParseInt(v, 10) + case "soft overflows": + stats.NumSoftOverflows = p.ParseInt(v, 10) + case "framing errs": + stats.NumFramingErr = p.ParseInt(v, 10) + case "buffer errs": + stats.NumBufferingErr = p.ParseInt(v, 10) + case "output errs": + stats.NumOutputErr = p.ParseInt(v, 10) + case "prom": + stats.Promiscuous = p.ParseInt(v, 10) + case "mbps": + stats.Mbps = p.ParseInt(v, 10) + case "addr": + stats.Addr = v + } + if err := p.Err(); err != nil { + return nil, err + } + } + if err := scanner.Err(); err != nil { + return nil, err + } + return &stats, nil +} + +type TCPStats struct { + MaxConn int + MaxSegment int + ActiveOpens int + PassiveOpens int + EstablishedResets int + CurrentEstablished int +} diff --git a/vendor/github.com/power-devops/perfstat/LICENSE b/vendor/github.com/power-devops/perfstat/LICENSE new file mode 100644 index 0000000000000..ec4e5d39d8c47 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/LICENSE @@ -0,0 +1,23 @@ +MIT License + +Copyright (c) 2020 Power DevOps + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + diff --git a/vendor/github.com/power-devops/perfstat/c_helpers.c b/vendor/github.com/power-devops/perfstat/c_helpers.c new file mode 100644 index 0000000000000..49ba1ad7eb6ef --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/c_helpers.c @@ -0,0 +1,159 @@ +#include "c_helpers.h" + +GETFUNC(cpu) +GETFUNC(disk) +GETFUNC(diskadapter) +GETFUNC(diskpath) +GETFUNC(fcstat) +GETFUNC(logicalvolume) +GETFUNC(memory_page) +GETFUNC(netadapter) +GETFUNC(netbuffer) +GETFUNC(netinterface) +GETFUNC(pagingspace) +GETFUNC(process) +GETFUNC(thread) +GETFUNC(volumegroup) + +double get_partition_mhz(perfstat_partition_config_t pinfo) { + return pinfo.processorMHz; +} + +char *get_ps_hostname(perfstat_pagingspace_t *ps) { + return ps->u.nfs_paging.hostname; +} + +char *get_ps_filename(perfstat_pagingspace_t *ps) { + return ps->u.nfs_paging.filename; +} + +char *get_ps_vgname(perfstat_pagingspace_t *ps) { + return ps->u.lv_paging.vgname; +} + +time_t boottime() +{ + register struct utmpx *utmp; + + setutxent(); + while ( (utmp = getutxent()) != NULL ) { + if (utmp->ut_type == BOOT_TIME) { + return utmp->ut_tv.tv_sec; + } + } + endutxent(); + return -1; +} + +struct fsinfo *get_filesystem_stat(struct fsinfo *fs_all, int n) { + if (!fs_all) return NULL; + return &(fs_all[n]); +} + +int get_mounts(struct vmount **vmountpp) { + int size; + struct vmount *vm; + int nmounts; + + size = BUFSIZ; + + while (1) { + if ((vm = (struct vmount *)malloc((size_t)size)) == NULL) { + perror("malloc failed"); + exit(-1); + } + if ((nmounts = mntctl(MCTL_QUERY, size, (caddr_t)vm)) > 0) { + *vmountpp = vm; + return nmounts; + } else if (nmounts == 0) { + size = *(int *)vm; + free((void *)vm); + } else { + free((void *)vm); + return -1; + } + } +} + +void fill_fsinfo(struct statfs statbuf, struct fsinfo *fs) { + fsblkcnt_t freeblks, totblks, usedblks; + fsblkcnt_t tinodes, ninodes, ifree; + uint cfactor; + + if (statbuf.f_blocks == -1) { + fs->totalblks = 0; + fs->freeblks = 0; + fs->totalinodes = 0; + fs->freeinodes = 0; + return; + } + + cfactor = statbuf.f_bsize / 512; + fs->freeblks = statbuf.f_bavail * cfactor; + fs->totalblks = statbuf.f_blocks * cfactor; + + fs->freeinodes = statbuf.f_ffree; + fs->totalinodes = statbuf.f_files; + + if (fs->freeblks < 0) + fs->freeblks = 0; +} + +int getfsinfo(char *fsname, char *devname, char *host, char *options, int flags, int fstype, struct fsinfo *fs) { + struct statfs statbuf; + int devname_size = strlen(devname); + int fsname_size = strlen(fsname); + char buf[BUFSIZ]; + char *p; + + if (fs == NULL) { + return 1; + } + + for (p = strtok(options, ","); p != NULL; p = strtok(NULL, ",")) + if (strcmp(p, "ignore") == 0) + return 0; + + if (*host != 0 && strcmp(host, "-") != 0) { + sprintf(buf, "%s:%s", host, devname); + devname = buf; + } + fs->devname = (char *)calloc(devname_size+1, 1); + fs->fsname = (char *)calloc(fsname_size+1, 1); + strncpy(fs->devname, devname, devname_size); + strncpy(fs->fsname, fsname, fsname_size); + fs->flags = flags; + fs->fstype = fstype; + + if (statfs(fsname,&statbuf) < 0) { + return 1; + } + + fill_fsinfo(statbuf, fs); + return 0; +} + +struct fsinfo *get_all_fs(int *rc) { + struct vmount *mnt; + struct fsinfo *fs_all; + int nmounts; + + *rc = -1; + if ((nmounts = get_mounts(&mnt)) <= 0) { + perror("Can't get mount table info"); + return NULL; + } + + fs_all = (struct fsinfo *)calloc(sizeof(struct fsinfo), nmounts); + while ((*rc)++, nmounts--) { + getfsinfo(vmt2dataptr(mnt, VMT_STUB), + vmt2dataptr(mnt, VMT_OBJECT), + vmt2dataptr(mnt, VMT_HOST), + vmt2dataptr(mnt, VMT_ARGS), + mnt->vmt_flags, + mnt->vmt_gfstype, + &fs_all[*rc]); + mnt = (struct vmount *)((char *)mnt + mnt->vmt_length); + } + return fs_all; +} diff --git a/vendor/github.com/power-devops/perfstat/c_helpers.h b/vendor/github.com/power-devops/perfstat/c_helpers.h new file mode 100644 index 0000000000000..b66bc53c3c189 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/c_helpers.h @@ -0,0 +1,58 @@ +#ifndef C_HELPERS_H +#define C_HELPERS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define GETFUNC(TYPE) perfstat_##TYPE##_t *get_##TYPE##_stat(perfstat_##TYPE##_t *b, int n) { \ + if (!b) return NULL; \ + return &(b[n]); \ +} + +#define GETFUNC_EXT(TYPE) extern perfstat_##TYPE##_t *get_##TYPE##_stat(perfstat_##TYPE##_t *, int); + +GETFUNC_EXT(cpu) +GETFUNC_EXT(disk) +GETFUNC_EXT(diskadapter) +GETFUNC_EXT(diskpath) +GETFUNC_EXT(fcstat) +GETFUNC_EXT(logicalvolume) +GETFUNC_EXT(memory_page) +GETFUNC_EXT(netadapter) +GETFUNC_EXT(netbuffer) +GETFUNC_EXT(netinterface) +GETFUNC_EXT(pagingspace) +GETFUNC_EXT(process) +GETFUNC_EXT(thread) +GETFUNC_EXT(volumegroup) + +struct fsinfo { + char *devname; + char *fsname; + int flags; + int fstype; + unsigned long totalblks; + unsigned long freeblks; + unsigned long totalinodes; + unsigned long freeinodes; +}; + +extern double get_partition_mhz(perfstat_partition_config_t); +extern char *get_ps_hostname(perfstat_pagingspace_t *); +extern char *get_ps_filename(perfstat_pagingspace_t *); +extern char *get_ps_vgname(perfstat_pagingspace_t *); +extern time_t boottime(); +struct fsinfo *get_filesystem_stat(struct fsinfo *, int); +int get_mounts(struct vmount **); +void fill_statfs(struct statfs, struct fsinfo *); +int getfsinfo(char *, char *, char *, char *, int, int, struct fsinfo *); +struct fsinfo *get_all_fs(int *); + +#endif diff --git a/vendor/github.com/power-devops/perfstat/config.go b/vendor/github.com/power-devops/perfstat/config.go new file mode 100644 index 0000000000000..de7230d28c0fb --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/config.go @@ -0,0 +1,18 @@ +// +build aix + +package perfstat + +/* +#cgo LDFLAGS: -lperfstat + +#include +*/ +import "C" + +func EnableLVMStat() { + C.perfstat_config(C.PERFSTAT_ENABLE|C.PERFSTAT_LV|C.PERFSTAT_VG, nil) +} + +func DisableLVMStat() { + C.perfstat_config(C.PERFSTAT_DISABLE|C.PERFSTAT_LV|C.PERFSTAT_VG, nil) +} diff --git a/vendor/github.com/power-devops/perfstat/cpustat.go b/vendor/github.com/power-devops/perfstat/cpustat.go new file mode 100644 index 0000000000000..902727fb8f748 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/cpustat.go @@ -0,0 +1,98 @@ +// +build aix + +package perfstat + +/* +#cgo LDFLAGS: -lperfstat + +#include +#include +#include + +#include "c_helpers.h" +*/ +import "C" + +import ( + "fmt" + "runtime" + "time" + "unsafe" +) + +func CpuStat() ([]CPU, error) { + var cpustat *C.perfstat_cpu_t + var cpu C.perfstat_id_t + + ncpu := runtime.NumCPU() + + cpustat_len := C.sizeof_perfstat_cpu_t * C.ulong(ncpu) + cpustat = (*C.perfstat_cpu_t)(C.malloc(cpustat_len)) + defer C.free(unsafe.Pointer(cpustat)) + C.strcpy(&cpu.name[0], C.CString(C.FIRST_CPU)) + r := C.perfstat_cpu(&cpu, cpustat, C.sizeof_perfstat_cpu_t, C.int(ncpu)) + if r <= 0 { + return nil, fmt.Errorf("error perfstat_cpu()") + } + c := make([]CPU, r) + for i := 0; i < int(r); i++ { + n := C.get_cpu_stat(cpustat, C.int(i)) + if n != nil { + c[i] = perfstatcpu2cpu(n) + } + } + return c, nil +} + +func CpuTotalStat() (*CPUTotal, error) { + var cpustat *C.perfstat_cpu_total_t + + cpustat = (*C.perfstat_cpu_total_t)(C.malloc(C.sizeof_perfstat_cpu_total_t)) + defer C.free(unsafe.Pointer(cpustat)) + r := C.perfstat_cpu_total(nil, cpustat, C.sizeof_perfstat_cpu_total_t, 1) + if r <= 0 { + return nil, fmt.Errorf("error perfstat_cpu_total()") + } + c := perfstatcputotal2cputotal(cpustat) + return &c, nil +} + +func CpuUtilStat(intvl time.Duration) (*CPUUtil, error) { + var cpuutil *C.perfstat_cpu_util_t + var newt *C.perfstat_cpu_total_t + var oldt *C.perfstat_cpu_total_t + var data C.perfstat_rawdata_t + + oldt = (*C.perfstat_cpu_total_t)(C.malloc(C.sizeof_perfstat_cpu_total_t)) + newt = (*C.perfstat_cpu_total_t)(C.malloc(C.sizeof_perfstat_cpu_total_t)) + cpuutil = (*C.perfstat_cpu_util_t)(C.malloc(C.sizeof_perfstat_cpu_util_t)) + defer C.free(unsafe.Pointer(oldt)) + defer C.free(unsafe.Pointer(newt)) + defer C.free(unsafe.Pointer(cpuutil)) + + r := C.perfstat_cpu_total(nil, oldt, C.sizeof_perfstat_cpu_total_t, 1) + if r <= 0 { + return nil, fmt.Errorf("error perfstat_cpu_total()") + } + + time.Sleep(intvl) + + r = C.perfstat_cpu_total(nil, newt, C.sizeof_perfstat_cpu_total_t, 1) + if r <= 0 { + return nil, fmt.Errorf("error perfstat_cpu_total()") + } + + data._type = C.UTIL_CPU_TOTAL + data.curstat = unsafe.Pointer(newt) + data.prevstat = unsafe.Pointer(oldt) + data.sizeof_data = C.sizeof_perfstat_cpu_total_t + data.cur_elems = 1 + data.prev_elems = 1 + + r = C.perfstat_cpu_util(&data, cpuutil, C.sizeof_perfstat_cpu_util_t, 1) + if r <= 0 { + return nil, fmt.Errorf("error perfstat_cpu_util()") + } + u := perfstatcpuutil2cpuutil(cpuutil) + return &u, nil +} diff --git a/vendor/github.com/power-devops/perfstat/diskstat.go b/vendor/github.com/power-devops/perfstat/diskstat.go new file mode 100644 index 0000000000000..fc70dfaa4e111 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/diskstat.go @@ -0,0 +1,137 @@ +// +build aix + +package perfstat + +/* +#cgo LDFLAGS: -lperfstat + +#include +#include +#include +#include "c_helpers.h" +*/ +import "C" + +import ( + "fmt" + "unsafe" +) + +func DiskTotalStat() (*DiskTotal, error) { + var disk C.perfstat_disk_total_t + + rc := C.perfstat_disk_total(nil, &disk, C.sizeof_perfstat_disk_total_t, 1) + if rc != 1 { + return nil, fmt.Errorf("perfstat_disk_total() error") + } + d := perfstatdisktotal2disktotal(disk) + return &d, nil +} + +func DiskAdapterStat() ([]DiskAdapter, error) { + var adapter *C.perfstat_diskadapter_t + var adptname C.perfstat_id_t + + numadpt := C.perfstat_diskadapter(nil, nil, C.sizeof_perfstat_diskadapter_t, 0) + if numadpt <= 0 { + return nil, fmt.Errorf("perfstat_diskadapter() error") + } + + adapter_len := C.sizeof_perfstat_diskadapter_t * C.ulong(numadpt) + adapter = (*C.perfstat_diskadapter_t)(C.malloc(adapter_len)) + defer C.free(unsafe.Pointer(adapter)) + C.strcpy(&adptname.name[0], C.CString(C.FIRST_DISKADAPTER)) + r := C.perfstat_diskadapter(&adptname, adapter, C.sizeof_perfstat_diskadapter_t, numadpt) + if r < 0 { + return nil, fmt.Errorf("perfstat_diskadapter() error") + } + da := make([]DiskAdapter, r) + for i := 0; i < int(r); i++ { + d := C.get_diskadapter_stat(adapter, C.int(i)) + if d != nil { + da[i] = perfstatdiskadapter2diskadapter(d) + } + } + return da, nil +} + +func DiskStat() ([]Disk, error) { + var disk *C.perfstat_disk_t + var diskname C.perfstat_id_t + + numdisk := C.perfstat_disk(nil, nil, C.sizeof_perfstat_disk_t, 0) + if numdisk <= 0 { + return nil, fmt.Errorf("perfstat_disk() error") + } + + disk_len := C.sizeof_perfstat_disk_t * C.ulong(numdisk) + disk = (*C.perfstat_disk_t)(C.malloc(disk_len)) + defer C.free(unsafe.Pointer(disk)) + C.strcpy(&diskname.name[0], C.CString(C.FIRST_DISK)) + r := C.perfstat_disk(&diskname, disk, C.sizeof_perfstat_disk_t, numdisk) + if r < 0 { + return nil, fmt.Errorf("perfstat_disk() error") + } + d := make([]Disk, r) + for i := 0; i < int(r); i++ { + ds := C.get_disk_stat(disk, C.int(i)) + if ds != nil { + d[i] = perfstatdisk2disk(ds) + } + } + return d, nil +} + +func DiskPathStat() ([]DiskPath, error) { + var diskpath *C.perfstat_diskpath_t + var pathname C.perfstat_id_t + + numpaths := C.perfstat_diskpath(nil, nil, C.sizeof_perfstat_diskpath_t, 0) + if numpaths <= 0 { + return nil, fmt.Errorf("perfstat_diskpath() error") + } + + path_len := C.sizeof_perfstat_diskpath_t * C.ulong(numpaths) + diskpath = (*C.perfstat_diskpath_t)(C.malloc(path_len)) + defer C.free(unsafe.Pointer(diskpath)) + C.strcpy(&pathname.name[0], C.CString(C.FIRST_DISKPATH)) + r := C.perfstat_diskpath(&pathname, diskpath, C.sizeof_perfstat_diskpath_t, numpaths) + if r < 0 { + return nil, fmt.Errorf("perfstat_diskpath() error") + } + d := make([]DiskPath, r) + for i := 0; i < int(r); i++ { + p := C.get_diskpath_stat(diskpath, C.int(i)) + if p != nil { + d[i] = perfstatdiskpath2diskpath(p) + } + } + return d, nil +} + +func FCAdapterStat() ([]FCAdapter, error) { + var fcstat *C.perfstat_fcstat_t + var fcname C.perfstat_id_t + + numadpt := C.perfstat_fcstat(nil, nil, C.sizeof_perfstat_fcstat_t, 0) + if numadpt <= 0 { + return nil, fmt.Errorf("perfstat_fcstat() error") + } + + fcstat_len := C.sizeof_perfstat_fcstat_t * C.ulong(numadpt) + fcstat = (*C.perfstat_fcstat_t)(C.malloc(fcstat_len)) + defer C.free(unsafe.Pointer(fcstat)) + C.strcpy(&fcname.name[0], C.CString(C.FIRST_NETINTERFACE)) + r := C.perfstat_fcstat(&fcname, fcstat, C.sizeof_perfstat_fcstat_t, numadpt) + if r < 0 { + return nil, fmt.Errorf("perfstat_fcstat() error") + } + fca := make([]FCAdapter, r) + for i := 0; i < int(r); i++ { + f := C.get_fcstat_stat(fcstat, C.int(i)) + if f != nil { + fca[i] = perfstatfcstat2fcadapter(f) + } + } + return fca, nil +} diff --git a/vendor/github.com/power-devops/perfstat/doc.go b/vendor/github.com/power-devops/perfstat/doc.go new file mode 100644 index 0000000000000..85eaf3e7eda36 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/doc.go @@ -0,0 +1,315 @@ +// +build !aix + +// Copyright 2020 Power-Devops.com. All rights reserved. +// Use of this source code is governed by the license +// that can be found in the LICENSE file. +/* +Package perfstat is Go interface to IBM AIX libperfstat. +To use it you need AIX with installed bos.perf.libperfstat. You can check, if is installed using the following command: + + $ lslpp -L bos.perf.perfstat + +The package is written using Go 1.14.7 and AIX 7.2 TL5. It should work with earlier TLs of AIX 7.2, but I +can't guarantee that perfstat structures in the TLs have all the same fields as the structures in AIX 7.2 TL5. + +For documentation of perfstat on AIX and using it in programs refer to the official IBM documentation: +https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat.html +*/ +package perfstat + +import ( + "fmt" + "time" +) + +// EnableLVMStat() switches on LVM (logical volumes and volume groups) performance statistics. +// With this enabled you can use fields KBReads, KBWrites, and IOCnt +// in LogicalVolume and VolumeGroup data types. +func EnableLVMStat() {} + +// DisableLVMStat() switchess of LVM (logical volumes and volume groups) performance statistics. +// This is the default state. In this case LogicalVolume and VolumeGroup data types are +// populated with informations about LVM structures, but performance statistics fields +// (KBReads, KBWrites, IOCnt) are empty. +func DisableLVMStat() {} + +// CpuStat() returns array of CPU structures with information about +// logical CPUs on the system. +// IBM documentation: +// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_int_cpu.html +// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu.html +func CpuStat() ([]CPU, error) { + return nil, fmt.Errorf("not implemented") +} + +// CpuTotalStat() returns general information about CPUs on the system. +// IBM documentation: +// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_glob_cpu.html +// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cputot.html +func CpuTotalStat() (*CPUTotal, error) { + return nil, fmt.Errorf("not implemented") +} + +// CpuUtilStat() calculates CPU utilization. +// IBM documentation: +// * https://www.ibm.com/support/knowledgecenter/ssw_aix_72/performancetools/idprftools_perfstat_cpu_util.html +// * https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/p_bostechref/perfstat_cpu_util.html +func CpuUtilStat(intvl time.Duration) (*CPUUtil, error) { + return nil, fmt.Errorf("not implemented") +} + +func DiskTotalStat() (*DiskTotal, error) { + return nil, fmt.Errorf("not implemented") +} + +func DiskAdapterStat() ([]DiskAdapter, error) { + return nil, fmt.Errorf("not implemented") +} + +func DiskStat() ([]Disk, error) { + return nil, fmt.Errorf("not implemented") +} + +func DiskPathStat() ([]DiskPath, error) { + return nil, fmt.Errorf("not implemented") +} + +func FCAdapterStat() ([]FCAdapter, error) { + return nil, fmt.Errorf("not implemented") +} + +func PartitionStat() (*PartitionConfig, error) { + return nil, fmt.Errorf("not implemented") +} + +func LogicalVolumeStat() ([]LogicalVolume, error) { + return nil, fmt.Errorf("not implemented") +} + +func VolumeGroupStat() ([]VolumeGroup, error) { + return nil, fmt.Errorf("not implemented") +} + +func MemoryTotalStat() (*MemoryTotal, error) { + return nil, fmt.Errorf("not implemented") +} + +func MemoryPageStat() ([]MemoryPage, error) { + return nil, fmt.Errorf("not implemented") +} + +func PagingSpaceStat() ([]PagingSpace, error) { + return nil, fmt.Errorf("not implemented") +} + +func NetIfaceTotalStat() (*NetIfaceTotal, error) { + return nil, fmt.Errorf("not implemented") +} + +func NetBufferStat() ([]NetBuffer, error) { + return nil, fmt.Errorf("not implemented") +} + +func NetIfaceStat() ([]NetIface, error) { + return nil, fmt.Errorf("not implemented") +} + +func NetAdapterStat() ([]NetAdapter, error) { + return nil, fmt.Errorf("not implemented") +} + +func ProcessStat() ([]Process, error) { + return nil, fmt.Errorf("not implemented") +} + +func ThreadStat() ([]Thread, error) { + return nil, fmt.Errorf("not implemented") +} + +func Sysconf(name int32) (int64, error) { + return 0, fmt.Errorf("not implemented") +} + +func GetCPUImplementation() string { + return "" +} + +func POWER9OrNewer() bool { + return false +} + +func POWER9() bool { + return false +} + +func POWER8OrNewer() bool { + return false +} + +func POWER8() bool { + return false +} + +func POWER7OrNewer() bool { + return false +} + +func POWER7() bool { + return false +} + +func HasTransactionalMemory() bool { + return false +} + +func Is64Bit() bool { + return false +} + +func IsSMP() bool { + return false +} + +func HasVMX() bool { + return false +} + +func HasVSX() bool { + return false +} + +func HasDFP() bool { + return false +} + +func HasNxGzip() bool { + return false +} + +func PksCapable() bool { + return false +} + +func PksEnabled() bool { + return false +} + +func CPUMode() string { + return "" +} + +func KernelBits() int { + return 0 +} + +func IsLPAR() bool { + return false +} + +func CpuAddCapable() bool { + return false +} + +func CpuRemoveCapable() bool { + return false +} + +func MemoryAddCapable() bool { + return false +} + +func MemoryRemoveCapable() bool { + return false +} + +func DLparCapable() bool { + return false +} + +func IsNUMA() bool { + return false +} + +func KernelKeys() bool { + return false +} + +func RecoveryMode() bool { + return false +} + +func EnhancedAffinity() bool { + return false +} + +func VTpmEnabled() bool { + return false +} + +func IsVIOS() bool { + return false +} + +func MLSEnabled() bool { + return false +} + +func SPLparCapable() bool { + return false +} + +func SPLparEnabled() bool { + return false +} + +func DedicatedLpar() bool { + return false +} + +func SPLparCapped() bool { + return false +} + +func SPLparDonating() bool { + return false +} + +func SmtCapable() bool { + return false +} + +func SmtEnabled() bool { + return false +} + +func VrmCapable() bool { + return false +} + +func VrmEnabled() bool { + return false +} + +func AmeEnabled() bool { + return false +} + +func EcoCapable() bool { + return false +} + +func EcoEnabled() bool { + return false +} + +func BootTime() (uint64, error) { + return 0, fmt.Errorf("Not implemented") +} + +func UptimeSeconds() (uint64, error) { + return 0, fmt.Errorf("Not implemented") +} + +func FileSystemStat() ([]FileSystem, error) { + return nil, fmt.Errorf("Not implemented") +} diff --git a/vendor/github.com/power-devops/perfstat/fsstat.go b/vendor/github.com/power-devops/perfstat/fsstat.go new file mode 100644 index 0000000000000..27f4c06c158f7 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/fsstat.go @@ -0,0 +1,31 @@ +// +build aix + +package perfstat + +/* +#include "c_helpers.h" +*/ +import "C" + +import ( + "fmt" +) + +func FileSystemStat() ([]FileSystem, error) { + var fsinfo *C.struct_fsinfo + var nmounts C.int + + fsinfo = C.get_all_fs(&nmounts) + if nmounts <= 0 { + return nil, fmt.Errorf("No mounts found") + } + + fs := make([]FileSystem, nmounts) + for i := 0; i < int(nmounts); i++ { + f := C.get_filesystem_stat(fsinfo, C.int(i)) + if f != nil { + fs[i] = fsinfo2filesystem(f) + } + } + return fs, nil +} diff --git a/vendor/github.com/power-devops/perfstat/helpers.go b/vendor/github.com/power-devops/perfstat/helpers.go new file mode 100644 index 0000000000000..e8d6997665ef0 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/helpers.go @@ -0,0 +1,764 @@ +// +build aix + +package perfstat + +/* +#cgo LDFLAGS: -lperfstat + +#include +#include + +#include "c_helpers.h" +*/ +import "C" + +func perfstatcpu2cpu(n *C.perfstat_cpu_t) CPU { + var c CPU + c.Name = C.GoString(&n.name[0]) + c.User = int64(n.user) + c.Sys = int64(n.sys) + c.Idle = int64(n.idle) + c.Wait = int64(n.wait) + c.PSwitch = int64(n.pswitch) + c.Syscall = int64(n.syscall) + c.Sysread = int64(n.sysread) + c.Syswrite = int64(n.syswrite) + c.Sysfork = int64(n.sysfork) + c.Sysexec = int64(n.sysexec) + c.Readch = int64(n.readch) + c.Writech = int64(n.writech) + c.Bread = int64(n.bread) + c.Bwrite = int64(n.bwrite) + c.Lread = int64(n.lread) + c.Lwrite = int64(n.lwrite) + c.Phread = int64(n.phread) + c.Phwrite = int64(n.phwrite) + c.Iget = int64(n.iget) + c.Namei = int64(n.namei) + c.Dirblk = int64(n.dirblk) + c.Msg = int64(n.msg) + c.Sema = int64(n.sema) + c.MinFaults = int64(n.minfaults) + c.MajFaults = int64(n.majfaults) + c.PUser = int64(n.puser) + c.PSys = int64(n.psys) + c.PIdle = int64(n.pidle) + c.PWait = int64(n.pwait) + c.RedispSD0 = int64(n.redisp_sd0) + c.RedispSD1 = int64(n.redisp_sd1) + c.RedispSD2 = int64(n.redisp_sd2) + c.RedispSD3 = int64(n.redisp_sd3) + c.RedispSD4 = int64(n.redisp_sd4) + c.RedispSD5 = int64(n.redisp_sd5) + c.MigrationPush = int64(n.migration_push) + c.MigrationS3grq = int64(n.migration_S3grq) + c.MigrationS3pul = int64(n.migration_S3pul) + c.InvolCSwitch = int64(n.invol_cswitch) + c.VolCSwitch = int64(n.vol_cswitch) + c.RunQueue = int64(n.runque) + c.Bound = int64(n.bound) + c.DecrIntrs = int64(n.decrintrs) + c.MpcRIntrs = int64(n.mpcrintrs) + c.MpcSIntrs = int64(n.mpcsintrs) + c.SoftIntrs = int64(n.softintrs) + c.DevIntrs = int64(n.devintrs) + c.PhantIntrs = int64(n.phantintrs) + c.IdleDonatedPurr = int64(n.idle_donated_purr) + c.IdleDonatedSpurr = int64(n.idle_donated_spurr) + c.BusyDonatedPurr = int64(n.busy_donated_purr) + c.BusyDonatedSpurr = int64(n.busy_donated_spurr) + c.IdleStolenPurr = int64(n.idle_stolen_purr) + c.IdleStolenSpurr = int64(n.idle_stolen_spurr) + c.BusyStolenPurr = int64(n.busy_stolen_purr) + c.BusyStolenSpurr = int64(n.busy_stolen_spurr) + c.Hpi = int64(n.hpi) + c.Hpit = int64(n.hpit) + c.PUserSpurr = int64(n.puser_spurr) + c.PSysSpurr = int64(n.psys_spurr) + c.PIdleSpurr = int64(n.pidle_spurr) + c.PWaitSpurr = int64(n.pwait_spurr) + c.SpurrFlag = int32(n.spurrflag) + c.LocalDispatch = int64(n.localdispatch) + c.NearDispatch = int64(n.neardispatch) + c.FarDispatch = int64(n.fardispatch) + c.CSwitches = int64(n.cswitches) + c.Version = int64(n.version) + c.TbLast = int64(n.tb_last) + c.State = int(n.state) + c.VtbLast = int64(n.vtb_last) + c.ICountLast = int64(n.icount_last) + return c +} + +func perfstatcputotal2cputotal(n *C.perfstat_cpu_total_t) CPUTotal { + var c CPUTotal + c.NCpus = int(n.ncpus) + c.NCpusCfg = int(n.ncpus_cfg) + c.Description = C.GoString(&n.description[0]) + c.ProcessorHz = int64(n.processorHZ) + c.User = int64(n.user) + c.Sys = int64(n.sys) + c.Idle = int64(n.idle) + c.Wait = int64(n.wait) + c.PSwitch = int64(n.pswitch) + c.Syscall = int64(n.syscall) + c.Sysread = int64(n.sysread) + c.Syswrite = int64(n.syswrite) + c.Sysfork = int64(n.sysfork) + c.Sysexec = int64(n.sysexec) + c.Readch = int64(n.readch) + c.Writech = int64(n.writech) + c.DevIntrs = int64(n.devintrs) + c.SoftIntrs = int64(n.softintrs) + c.Lbolt = int64(n.lbolt) + c.LoadAvg1 = (float32(n.loadavg[0]) / (1 << C.SBITS)) + c.LoadAvg5 = (float32(n.loadavg[1]) / (1 << C.SBITS)) + c.LoadAvg15 = (float32(n.loadavg[2]) / (1 << C.SBITS)) + c.RunQueue = int64(n.runque) + c.SwpQueue = int64(n.swpque) + c.Bread = int64(n.bread) + c.Bwrite = int64(n.bwrite) + c.Lread = int64(n.lread) + c.Lwrite = int64(n.lwrite) + c.Phread = int64(n.phread) + c.Phwrite = int64(n.phwrite) + c.RunOcc = int64(n.runocc) + c.SwpOcc = int64(n.swpocc) + c.Iget = int64(n.iget) + c.Namei = int64(n.namei) + c.Dirblk = int64(n.dirblk) + c.Msg = int64(n.msg) + c.Sema = int64(n.sema) + c.RcvInt = int64(n.rcvint) + c.XmtInt = int64(n.xmtint) + c.MdmInt = int64(n.mdmint) + c.TtyRawInch = int64(n.tty_rawinch) + c.TtyCanInch = int64(n.tty_caninch) + c.TtyRawOutch = int64(n.tty_rawoutch) + c.Ksched = int64(n.ksched) + c.Koverf = int64(n.koverf) + c.Kexit = int64(n.kexit) + c.Rbread = int64(n.rbread) + c.Rcread = int64(n.rcread) + c.Rbwrt = int64(n.rbwrt) + c.Rcwrt = int64(n.rcwrt) + c.Traps = int64(n.traps) + c.NCpusHigh = int64(n.ncpus_high) + c.PUser = int64(n.puser) + c.PSys = int64(n.psys) + c.PIdle = int64(n.pidle) + c.PWait = int64(n.pwait) + c.DecrIntrs = int64(n.decrintrs) + c.MpcRIntrs = int64(n.mpcrintrs) + c.MpcSIntrs = int64(n.mpcsintrs) + c.PhantIntrs = int64(n.phantintrs) + c.IdleDonatedPurr = int64(n.idle_donated_purr) + c.IdleDonatedSpurr = int64(n.idle_donated_spurr) + c.BusyDonatedPurr = int64(n.busy_donated_purr) + c.BusyDonatedSpurr = int64(n.busy_donated_spurr) + c.IdleStolenPurr = int64(n.idle_stolen_purr) + c.IdleStolenSpurr = int64(n.idle_stolen_spurr) + c.BusyStolenPurr = int64(n.busy_stolen_purr) + c.BusyStolenSpurr = int64(n.busy_stolen_spurr) + c.IOWait = int32(n.iowait) + c.PhysIO = int32(n.physio) + c.TWait = int64(n.twait) + c.Hpi = int64(n.hpi) + c.Hpit = int64(n.hpit) + c.PUserSpurr = int64(n.puser_spurr) + c.PSysSpurr = int64(n.psys_spurr) + c.PIdleSpurr = int64(n.pidle_spurr) + c.PWaitSpurr = int64(n.pwait_spurr) + c.SpurrFlag = int(n.spurrflag) + c.Version = int64(n.version) + c.TbLast = int64(n.tb_last) + c.PurrCoalescing = int64(n.purr_coalescing) + c.SpurrCoalescing = int64(n.spurr_coalescing) + return c +} + +func perfstatcpuutil2cpuutil(n *C.perfstat_cpu_util_t) CPUUtil { + var c CPUUtil + + c.Version = int64(n.version) + c.CpuID = C.GoString(&n.cpu_id[0]) + c.Entitlement = float32(n.entitlement) + c.UserPct = float32(n.user_pct) + c.KernPct = float32(n.kern_pct) + c.IdlePct = float32(n.idle_pct) + c.WaitPct = float32(n.wait_pct) + c.PhysicalBusy = float32(n.physical_busy) + c.PhysicalConsumed = float32(n.physical_consumed) + c.FreqPct = float32(n.freq_pct) + c.EntitlementPct = float32(n.entitlement_pct) + c.BusyPct = float32(n.busy_pct) + c.IdleDonatedPct = float32(n.idle_donated_pct) + c.BusyDonatedPct = float32(n.busy_donated_pct) + c.IdleStolenPct = float32(n.idle_stolen_pct) + c.BusyStolenPct = float32(n.busy_stolen_pct) + c.LUserPct = float32(n.l_user_pct) + c.LKernPct = float32(n.l_kern_pct) + c.LIdlePct = float32(n.l_idle_pct) + c.LWaitPct = float32(n.l_wait_pct) + c.DeltaTime = int64(n.delta_time) + + return c +} + +func perfstatdisktotal2disktotal(n C.perfstat_disk_total_t) DiskTotal { + var d DiskTotal + + d.Number = int32(n.number) + d.Size = int64(n.size) + d.Free = int64(n.free) + d.XRate = int64(n.xrate) + d.Xfers = int64(n.xfers) + d.Wblks = int64(n.wblks) + d.Rblks = int64(n.rblks) + d.Time = int64(n.time) + d.Version = int64(n.version) + d.Rserv = int64(n.rserv) + d.MinRserv = int64(n.min_rserv) + d.MaxRserv = int64(n.max_rserv) + d.RTimeOut = int64(n.rtimeout) + d.RFailed = int64(n.rfailed) + d.Wserv = int64(n.wserv) + d.MinWserv = int64(n.min_wserv) + d.MaxWserv = int64(n.max_wserv) + d.WTimeOut = int64(n.wtimeout) + d.WFailed = int64(n.wfailed) + d.WqDepth = int64(n.wq_depth) + d.WqTime = int64(n.wq_time) + d.WqMinTime = int64(n.wq_min_time) + d.WqMaxTime = int64(n.wq_max_time) + + return d +} + +func perfstatdiskadapter2diskadapter(n *C.perfstat_diskadapter_t) DiskAdapter { + var d DiskAdapter + + d.Name = C.GoString(&n.name[0]) + d.Description = C.GoString(&n.description[0]) + d.Number = int32(n.number) + d.Size = int64(n.size) + d.Free = int64(n.free) + d.XRate = int64(n.xrate) + d.Xfers = int64(n.xfers) + d.Rblks = int64(n.rblks) + d.Wblks = int64(n.wblks) + d.Time = int64(n.time) + d.Version = int64(n.version) + d.AdapterType = int64(n.adapter_type) + d.DkBSize = int64(n.dk_bsize) + d.DkRserv = int64(n.dk_rserv) + d.DkWserv = int64(n.dk_wserv) + d.MinRserv = int64(n.min_rserv) + d.MaxRserv = int64(n.max_rserv) + d.MinWserv = int64(n.min_wserv) + d.MaxWserv = int64(n.max_wserv) + d.WqDepth = int64(n.wq_depth) + d.WqSampled = int64(n.wq_sampled) + d.WqTime = int64(n.wq_time) + d.WqMinTime = int64(n.wq_min_time) + d.WqMaxTime = int64(n.wq_max_time) + d.QFull = int64(n.q_full) + d.QSampled = int64(n.q_sampled) + + return d +} + +func perfstatpartitionconfig2partitionconfig(n C.perfstat_partition_config_t) PartitionConfig { + var p PartitionConfig + p.Version = int64(n.version) + p.Name = C.GoString(&n.partitionname[0]) + p.Node = C.GoString(&n.nodename[0]) + p.Conf.SmtCapable = (n.conf[0] & (1 << 7)) > 0 + p.Conf.SmtEnabled = (n.conf[0] & (1 << 6)) > 0 + p.Conf.LparCapable = (n.conf[0] & (1 << 5)) > 0 + p.Conf.LparEnabled = (n.conf[0] & (1 << 4)) > 0 + p.Conf.SharedCapable = (n.conf[0] & (1 << 3)) > 0 + p.Conf.SharedEnabled = (n.conf[0] & (1 << 2)) > 0 + p.Conf.DLparCapable = (n.conf[0] & (1 << 1)) > 0 + p.Conf.Capped = (n.conf[0] & (1 << 0)) > 0 + p.Conf.Kernel64bit = (n.conf[1] & (1 << 7)) > 0 + p.Conf.PoolUtilAuthority = (n.conf[1] & (1 << 6)) > 0 + p.Conf.DonateCapable = (n.conf[1] & (1 << 5)) > 0 + p.Conf.DonateEnabled = (n.conf[1] & (1 << 4)) > 0 + p.Conf.AmsCapable = (n.conf[1] & (1 << 3)) > 0 + p.Conf.AmsEnabled = (n.conf[1] & (1 << 2)) > 0 + p.Conf.PowerSave = (n.conf[1] & (1 << 1)) > 0 + p.Conf.AmeEnabled = (n.conf[1] & (1 << 0)) > 0 + p.Conf.SharedExtended = (n.conf[2] & (1 << 7)) > 0 + p.Number = int32(n.partitionnum) + p.GroupID = int32(n.groupid) + p.ProcessorFamily = C.GoString(&n.processorFamily[0]) + p.ProcessorModel = C.GoString(&n.processorModel[0]) + p.MachineID = C.GoString(&n.machineID[0]) + p.ProcessorMhz = float64(C.get_partition_mhz(n)) + p.NumProcessors.Online = int64(n.numProcessors.online) + p.NumProcessors.Max = int64(n.numProcessors.max) + p.NumProcessors.Min = int64(n.numProcessors.min) + p.NumProcessors.Desired = int64(n.numProcessors.desired) + p.OSName = C.GoString(&n.OSName[0]) + p.OSVersion = C.GoString(&n.OSVersion[0]) + p.OSBuild = C.GoString(&n.OSBuild[0]) + p.LCpus = int32(n.lcpus) + p.SmtThreads = int32(n.smtthreads) + p.Drives = int32(n.drives) + p.NetworkAdapters = int32(n.nw_adapters) + p.CpuCap.Online = int64(n.cpucap.online) + p.CpuCap.Max = int64(n.cpucap.max) + p.CpuCap.Min = int64(n.cpucap.min) + p.CpuCap.Desired = int64(n.cpucap.desired) + p.Weightage = int32(n.cpucap_weightage) + p.EntCapacity = int32(n.entitled_proc_capacity) + p.VCpus.Online = int64(n.vcpus.online) + p.VCpus.Max = int64(n.vcpus.max) + p.VCpus.Min = int64(n.vcpus.min) + p.VCpus.Desired = int64(n.vcpus.desired) + p.PoolID = int32(n.processor_poolid) + p.ActiveCpusInPool = int32(n.activecpusinpool) + p.PoolWeightage = int32(n.cpupool_weightage) + p.SharedPCpu = int32(n.sharedpcpu) + p.MaxPoolCap = int32(n.maxpoolcap) + p.EntPoolCap = int32(n.entpoolcap) + p.Mem.Online = int64(n.mem.online) + p.Mem.Max = int64(n.mem.max) + p.Mem.Min = int64(n.mem.min) + p.Mem.Desired = int64(n.mem.desired) + p.MemWeightage = int32(n.mem_weightage) + p.TotalIOMemoryEntitlement = int64(n.totiomement) + p.MemPoolID = int32(n.mempoolid) + p.HyperPgSize = int64(n.hyperpgsize) + p.ExpMem.Online = int64(n.exp_mem.online) + p.ExpMem.Max = int64(n.exp_mem.max) + p.ExpMem.Min = int64(n.exp_mem.min) + p.ExpMem.Desired = int64(n.exp_mem.desired) + p.TargetMemExpFactor = int64(n.targetmemexpfactor) + p.TargetMemExpSize = int64(n.targetmemexpsize) + p.SubProcessorMode = int32(n.subprocessor_mode) + return p +} + +func perfstatmemorytotal2memorytotal(n C.perfstat_memory_total_t) MemoryTotal { + var m MemoryTotal + m.VirtualTotal = int64(n.virt_total) + m.RealTotal = int64(n.real_total) + m.RealFree = int64(n.real_free) + m.RealPinned = int64(n.real_pinned) + m.RealInUse = int64(n.real_inuse) + m.BadPages = int64(n.pgbad) + m.PageFaults = int64(n.pgexct) + m.PageIn = int64(n.pgins) + m.PageOut = int64(n.pgouts) + m.PgSpIn = int64(n.pgspins) + m.PgSpOut = int64(n.pgspouts) + m.Scans = int64(n.scans) + m.Cycles = int64(n.cycles) + m.PgSteals = int64(n.pgsteals) + m.NumPerm = int64(n.numperm) + m.PgSpTotal = int64(n.pgsp_total) + m.PgSpFree = int64(n.pgsp_free) + m.PgSpRsvd = int64(n.pgsp_rsvd) + m.RealSystem = int64(n.real_system) + m.RealUser = int64(n.real_user) + m.RealProcess = int64(n.real_process) + m.VirtualActive = int64(n.virt_active) + m.IOME = int64(n.iome) + m.IOMU = int64(n.iomu) + m.IOHWM = int64(n.iohwm) + m.PMem = int64(n.pmem) + m.CompressedTotal = int64(n.comprsd_total) + m.CompressedWSegPg = int64(n.comprsd_wseg_pgs) + m.CPgIn = int64(n.cpgins) + m.CPgOut = int64(n.cpgouts) + m.TrueSize = int64(n.true_size) + m.ExpandedMemory = int64(n.expanded_memory) + m.CompressedWSegSize = int64(n.comprsd_wseg_size) + m.TargetCPoolSize = int64(n.target_cpool_size) + m.MaxCPoolSize = int64(n.max_cpool_size) + m.MinUCPoolSize = int64(n.min_ucpool_size) + m.CPoolSize = int64(n.cpool_size) + m.UCPoolSize = int64(n.ucpool_size) + m.CPoolInUse = int64(n.cpool_inuse) + m.UCPoolInUse = int64(n.ucpool_inuse) + m.Version = int64(n.version) + m.RealAvailable = int64(n.real_avail) + m.BytesCoalesced = int64(n.bytes_coalesced) + m.BytesCoalescedMemPool = int64(n.bytes_coalesced_mempool) + + return m +} + +func perfstatnetinterfacetotal2netifacetotal(n C.perfstat_netinterface_total_t) NetIfaceTotal { + var i NetIfaceTotal + + i.Number = int32(n.number) + i.IPackets = int64(n.ipackets) + i.IBytes = int64(n.ibytes) + i.IErrors = int64(n.ierrors) + i.OPackets = int64(n.opackets) + i.OBytes = int64(n.obytes) + i.OErrors = int64(n.oerrors) + i.Collisions = int64(n.collisions) + i.XmitDrops = int64(n.xmitdrops) + i.Version = int64(n.version) + + return i +} + +func perfstatdisk2disk(n *C.perfstat_disk_t) Disk { + var d Disk + + d.Name = C.GoString(&n.name[0]) + d.Description = C.GoString(&n.description[0]) + d.VGName = C.GoString(&n.vgname[0]) + d.Size = int64(n.size) + d.Free = int64(n.free) + d.BSize = int64(n.bsize) + d.XRate = int64(n.xrate) + d.Xfers = int64(n.xfers) + d.Wblks = int64(n.wblks) + d.Rblks = int64(n.rblks) + d.QDepth = int64(n.qdepth) + d.Time = int64(n.time) + d.Adapter = C.GoString(&n.adapter[0]) + d.PathsCount = int32(n.paths_count) + d.QFull = int64(n.q_full) + d.Rserv = int64(n.rserv) + d.RTimeOut = int64(n.rtimeout) + d.Rfailed = int64(n.rfailed) + d.MinRserv = int64(n.min_rserv) + d.MaxRserv = int64(n.max_rserv) + d.Wserv = int64(n.wserv) + d.WTimeOut = int64(n.wtimeout) + d.Wfailed = int64(n.wfailed) + d.MinWserv = int64(n.min_wserv) + d.MaxWserv = int64(n.max_wserv) + d.WqDepth = int64(n.wq_depth) + d.WqSampled = int64(n.wq_sampled) + d.WqTime = int64(n.wq_time) + d.WqMinTime = int64(n.wq_min_time) + d.WqMaxTime = int64(n.wq_max_time) + d.QSampled = int64(n.q_sampled) + d.Version = int64(n.version) + d.PseudoDisk = (n.dk_type[0] & (1 << 7)) > 0 + d.VTDisk = (n.dk_type[0] & (1 << 6)) > 0 + + return d +} + +func perfstatdiskpath2diskpath(n *C.perfstat_diskpath_t) DiskPath { + var d DiskPath + + d.Name = C.GoString(&n.name[0]) + d.XRate = int64(n.xrate) + d.Xfers = int64(n.xfers) + d.Rblks = int64(n.rblks) + d.Wblks = int64(n.wblks) + d.Time = int64(n.time) + d.Adapter = C.GoString(&n.adapter[0]) + d.QFull = int64(n.q_full) + d.Rserv = int64(n.rserv) + d.RTimeOut = int64(n.rtimeout) + d.Rfailed = int64(n.rfailed) + d.MinRserv = int64(n.min_rserv) + d.MaxRserv = int64(n.max_rserv) + d.Wserv = int64(n.wserv) + d.WTimeOut = int64(n.wtimeout) + d.Wfailed = int64(n.wfailed) + d.MinWserv = int64(n.min_wserv) + d.MaxWserv = int64(n.max_wserv) + d.WqDepth = int64(n.wq_depth) + d.WqSampled = int64(n.wq_sampled) + d.WqTime = int64(n.wq_time) + d.WqMinTime = int64(n.wq_min_time) + d.WqMaxTime = int64(n.wq_max_time) + d.QSampled = int64(n.q_sampled) + d.Version = int64(n.version) + + return d +} + +func perfstatfcstat2fcadapter(n *C.perfstat_fcstat_t) FCAdapter { + var f FCAdapter + + f.Version = int64(n.version) + f.Name = C.GoString(&n.name[0]) + f.State = int32(n.state) + f.InputRequests = int64(n.InputRequests) + f.OutputRequests = int64(n.OutputRequests) + f.InputBytes = int64(n.InputBytes) + f.OutputBytes = int64(n.OutputBytes) + f.EffMaxTransfer = int64(n.EffMaxTransfer) + f.NoDMAResourceCnt = int64(n.NoDMAResourceCnt) + f.NoCmdResourceCnt = int64(n.NoCmdResourceCnt) + f.AttentionType = int32(n.AttentionType) + f.SecondsSinceLastReset = int64(n.SecondsSinceLastReset) + f.TxFrames = int64(n.TxFrames) + f.TxWords = int64(n.TxWords) + f.RxFrames = int64(n.RxFrames) + f.RxWords = int64(n.RxWords) + f.LIPCount = int64(n.LIPCount) + f.NOSCount = int64(n.NOSCount) + f.ErrorFrames = int64(n.ErrorFrames) + f.DumpedFrames = int64(n.DumpedFrames) + f.LinkFailureCount = int64(n.LinkFailureCount) + f.LossofSyncCount = int64(n.LossofSyncCount) + f.LossofSignal = int64(n.LossofSignal) + f.PrimitiveSeqProtocolErrCount = int64(n.PrimitiveSeqProtocolErrCount) + f.InvalidTxWordCount = int64(n.InvalidTxWordCount) + f.InvalidCRCCount = int64(n.InvalidCRCCount) + f.PortFcId = int64(n.PortFcId) + f.PortSpeed = int64(n.PortSpeed) + f.PortType = C.GoString(&n.PortType[0]) + f.PortWWN = int64(n.PortWWN) + f.PortSupportedSpeed = int64(n.PortSupportedSpeed) + f.AdapterType = int(n.adapter_type) + f.VfcName = C.GoString(&n.vfc_name[0]) + f.ClientPartName = C.GoString(&n.client_part_name[0]) + + return f +} + +func perfstatlogicalvolume2logicalvolume(n *C.perfstat_logicalvolume_t) LogicalVolume { + var l LogicalVolume + + l.Name = C.GoString(&n.name[0]) + l.VGName = C.GoString(&n.vgname[0]) + l.OpenClose = int64(n.open_close) + l.State = int64(n.state) + l.MirrorPolicy = int64(n.mirror_policy) + l.MirrorWriteConsistency = int64(n.mirror_write_consistency) + l.WriteVerify = int64(n.write_verify) + l.PPsize = int64(n.ppsize) + l.LogicalPartitions = int64(n.logical_partitions) + l.Mirrors = int32(n.mirrors) + l.IOCnt = int64(n.iocnt) + l.KBReads = int64(n.kbreads) + l.KBWrites = int64(n.kbwrites) + l.Version = int64(n.version) + + return l +} + +func perfstatvolumegroup2volumegroup(n *C.perfstat_volumegroup_t) VolumeGroup { + var v VolumeGroup + + v.Name = C.GoString(&n.name[0]) + v.TotalDisks = int64(n.total_disks) + v.ActiveDisks = int64(n.active_disks) + v.TotalLogicalVolumes = int64(n.total_logical_volumes) + v.OpenedLogicalVolumes = int64(n.opened_logical_volumes) + v.IOCnt = int64(n.iocnt) + v.KBReads = int64(n.kbreads) + v.KBWrites = int64(n.kbwrites) + v.Version = int64(n.version) + v.VariedState = int(n.variedState) + + return v +} + +func perfstatmemorypage2memorypage(n *C.perfstat_memory_page_t) MemoryPage { + var m MemoryPage + + m.PSize = int64(n.psize) + m.RealTotal = int64(n.real_total) + m.RealFree = int64(n.real_free) + m.RealPinned = int64(n.real_pinned) + m.RealInUse = int64(n.real_inuse) + m.PgExct = int64(n.pgexct) + m.PgIns = int64(n.pgins) + m.PgOuts = int64(n.pgouts) + m.PgSpIns = int64(n.pgspins) + m.PgSpOuts = int64(n.pgspouts) + m.Scans = int64(n.scans) + m.Cycles = int64(n.cycles) + m.PgSteals = int64(n.pgsteals) + m.NumPerm = int64(n.numperm) + m.NumPgSp = int64(n.numpgsp) + m.RealSystem = int64(n.real_system) + m.RealUser = int64(n.real_user) + m.RealProcess = int64(n.real_process) + m.VirtActive = int64(n.virt_active) + m.ComprsdTotal = int64(n.comprsd_total) + m.ComprsdWsegPgs = int64(n.comprsd_wseg_pgs) + m.CPgIns = int64(n.cpgins) + m.CPgOuts = int64(n.cpgouts) + m.CPoolInUse = int64(n.cpool_inuse) + m.UCPoolSize = int64(n.ucpool_size) + m.ComprsdWsegSize = int64(n.comprsd_wseg_size) + m.Version = int64(n.version) + m.RealAvail = int64(n.real_avail) + + return m +} + +func perfstatnetbuffer2netbuffer(n *C.perfstat_netbuffer_t) NetBuffer { + var b NetBuffer + + b.Name = C.GoString(&n.name[0]) + b.InUse = int64(n.inuse) + b.Calls = int64(n.calls) + b.Delayed = int64(n.delayed) + b.Free = int64(n.free) + b.Failed = int64(n.failed) + b.HighWatermark = int64(n.highwatermark) + b.Freed = int64(n.freed) + b.Version = int64(n.version) + + return b +} + +func perfstatnetinterface2netiface(n *C.perfstat_netinterface_t) NetIface { + var i NetIface + + i.Name = C.GoString(&n.name[0]) + i.Description = C.GoString(&n.description[0]) + i.Type = uint8(n._type) + i.MTU = int64(n.mtu) + i.IPackets = int64(n.ipackets) + i.IBytes = int64(n.ibytes) + i.IErrors = int64(n.ierrors) + i.OPackets = int64(n.opackets) + i.OBytes = int64(n.obytes) + i.OErrors = int64(n.oerrors) + i.Collisions = int64(n.collisions) + i.Bitrate = int64(n.bitrate) + i.XmitDrops = int64(n.xmitdrops) + i.Version = int64(n.version) + i.IfIqDrops = int64(n.if_iqdrops) + i.IfArpDrops = int64(n.if_arpdrops) + + return i +} + +func perfstatnetadapter2netadapter(n *C.perfstat_netadapter_t) NetAdapter { + var i NetAdapter + + i.Version = int64(n.version) + i.Name = C.GoString(&n.name[0]) + i.TxPackets = int64(n.tx_packets) + i.TxBytes = int64(n.tx_bytes) + i.TxInterrupts = int64(n.tx_interrupts) + i.TxErrors = int64(n.tx_errors) + i.TxPacketsDropped = int64(n.tx_packets_dropped) + i.TxQueueSize = int64(n.tx_queue_size) + i.TxQueueLen = int64(n.tx_queue_len) + i.TxQueueOverflow = int64(n.tx_queue_overflow) + i.TxBroadcastPackets = int64(n.tx_broadcast_packets) + i.TxMulticastPackets = int64(n.tx_multicast_packets) + i.TxCarrierSense = int64(n.tx_carrier_sense) + i.TxDMAUnderrun = int64(n.tx_DMA_underrun) + i.TxLostCTSErrors = int64(n.tx_lost_CTS_errors) + i.TxMaxCollisionErrors = int64(n.tx_max_collision_errors) + i.TxLateCollisionErrors = int64(n.tx_late_collision_errors) + i.TxDeferred = int64(n.tx_deferred) + i.TxTimeoutErrors = int64(n.tx_timeout_errors) + i.TxSingleCollisionCount = int64(n.tx_single_collision_count) + i.TxMultipleCollisionCount = int64(n.tx_multiple_collision_count) + i.RxPackets = int64(n.rx_packets) + i.RxBytes = int64(n.rx_bytes) + i.RxInterrupts = int64(n.rx_interrupts) + i.RxErrors = int64(n.rx_errors) + i.RxPacketsDropped = int64(n.rx_packets_dropped) + i.RxBadPackets = int64(n.rx_bad_packets) + i.RxMulticastPackets = int64(n.rx_multicast_packets) + i.RxBroadcastPackets = int64(n.rx_broadcast_packets) + i.RxCRCErrors = int64(n.rx_CRC_errors) + i.RxDMAOverrun = int64(n.rx_DMA_overrun) + i.RxAlignmentErrors = int64(n.rx_alignment_errors) + i.RxNoResourceErrors = int64(n.rx_noresource_errors) + i.RxCollisionErrors = int64(n.rx_collision_errors) + i.RxPacketTooShortErrors = int64(n.rx_packet_tooshort_errors) + i.RxPacketTooLongErrors = int64(n.rx_packet_toolong_errors) + i.RxPacketDiscardedByAdapter = int64(n.rx_packets_discardedbyadapter) + i.AdapterType = int32(n.adapter_type) + + return i +} + +func perfstatpagingspace2pagingspace(n *C.perfstat_pagingspace_t) PagingSpace { + var i PagingSpace + + i.Name = C.GoString(&n.name[0]) + i.Type = uint8(n._type) + i.VGName = C.GoString(C.get_ps_vgname(n)) + i.Hostname = C.GoString(C.get_ps_hostname(n)) + i.Filename = C.GoString(C.get_ps_filename(n)) + i.LPSize = int64(n.lp_size) + i.MBSize = int64(n.mb_size) + i.MBUsed = int64(n.mb_used) + i.IOPending = int64(n.io_pending) + i.Active = uint8(n.active) + i.Automatic = uint8(n.automatic) + i.Version = int64(n.version) + + return i +} + +func perfstatprocess2process(n *C.perfstat_process_t) Process { + var i Process + + i.Version = int64(n.version) + i.PID = int64(n.pid) + i.ProcessName = C.GoString(&n.proc_name[0]) + i.Priority = int32(n.proc_priority) + i.NumThreads = int64(n.num_threads) + i.UID = int64(n.proc_uid) + i.ClassID = int64(n.proc_classid) + i.Size = int64(n.proc_size) + i.RealMemData = int64(n.proc_real_mem_data) + i.RealMemText = int64(n.proc_real_mem_text) + i.VirtMemData = int64(n.proc_virt_mem_data) + i.VirtMemText = int64(n.proc_virt_mem_text) + i.SharedLibDataSize = int64(n.shared_lib_data_size) + i.HeapSize = int64(n.heap_size) + i.RealInUse = int64(n.real_inuse) + i.VirtInUse = int64(n.virt_inuse) + i.Pinned = int64(n.pinned) + i.PgSpInUse = int64(n.pgsp_inuse) + i.FilePages = int64(n.filepages) + i.RealInUseMap = int64(n.real_inuse_map) + i.VirtInUseMap = int64(n.virt_inuse_map) + i.PinnedInUseMap = int64(n.pinned_inuse_map) + i.UCpuTime = float64(n.ucpu_time) + i.SCpuTime = float64(n.scpu_time) + i.LastTimeBase = int64(n.last_timebase) + i.InBytes = int64(n.inBytes) + i.OutBytes = int64(n.outBytes) + i.InOps = int64(n.inOps) + i.OutOps = int64(n.outOps) + + return i +} + +func perfstatthread2thread(n *C.perfstat_thread_t) Thread { + var i Thread + + i.TID = int64(n.tid) + i.PID = int64(n.pid) + i.CpuID = int64(n.cpuid) + i.UCpuTime = float64(n.ucpu_time) + i.SCpuTime = float64(n.scpu_time) + i.LastTimeBase = int64(n.last_timebase) + i.Version = int64(n.version) + + return i +} + +func fsinfo2filesystem(n *C.struct_fsinfo) FileSystem { + var i FileSystem + + i.Device = C.GoString(n.devname) + i.MountPoint = C.GoString(n.fsname) + i.FSType = int(n.fstype) + i.Flags = int(n.flags) + i.TotalBlocks = int64(n.totalblks) + i.FreeBlocks = int64(n.freeblks) + i.TotalInodes = int64(n.totalinodes) + i.FreeInodes = int64(n.freeinodes) + + return i +} diff --git a/vendor/github.com/power-devops/perfstat/lparstat.go b/vendor/github.com/power-devops/perfstat/lparstat.go new file mode 100644 index 0000000000000..0ce35e3c562d3 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/lparstat.go @@ -0,0 +1,26 @@ +// +build aix + +package perfstat + +/* +#cgo LDFLAGS: -lperfstat + +#include +*/ +import "C" + +import ( + "fmt" +) + +func PartitionStat() (*PartitionConfig, error) { + var part C.perfstat_partition_config_t + + rc := C.perfstat_partition_config(nil, &part, C.sizeof_perfstat_partition_config_t, 1) + if rc != 1 { + return nil, fmt.Errorf("perfstat_partition_config() error") + } + p := perfstatpartitionconfig2partitionconfig(part) + return &p, nil + +} diff --git a/vendor/github.com/power-devops/perfstat/lvmstat.go b/vendor/github.com/power-devops/perfstat/lvmstat.go new file mode 100644 index 0000000000000..eb2064c80467a --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/lvmstat.go @@ -0,0 +1,72 @@ +// +build aix + +package perfstat + +/* +#cgo LDFLAGS: -lperfstat + +#include +#include +#include +#include "c_helpers.h" +*/ +import "C" + +import ( + "fmt" + "unsafe" +) + +func LogicalVolumeStat() ([]LogicalVolume, error) { + var lv *C.perfstat_logicalvolume_t + var lvname C.perfstat_id_t + + numlvs := C.perfstat_logicalvolume(nil, nil, C.sizeof_perfstat_logicalvolume_t, 0) + if numlvs <= 0 { + return nil, fmt.Errorf("perfstat_logicalvolume() error") + } + + lv_len := C.sizeof_perfstat_logicalvolume_t * C.ulong(numlvs) + lv = (*C.perfstat_logicalvolume_t)(C.malloc(lv_len)) + defer C.free(unsafe.Pointer(lv)) + C.strcpy(&lvname.name[0], C.CString("")) + r := C.perfstat_logicalvolume(&lvname, lv, C.sizeof_perfstat_logicalvolume_t, numlvs) + if r < 0 { + return nil, fmt.Errorf("perfstat_logicalvolume() error") + } + lvs := make([]LogicalVolume, r) + for i := 0; i < int(r); i++ { + l := C.get_logicalvolume_stat(lv, C.int(i)) + if l != nil { + lvs[i] = perfstatlogicalvolume2logicalvolume(l) + } + } + return lvs, nil +} + +func VolumeGroupStat() ([]VolumeGroup, error) { + var vg *C.perfstat_volumegroup_t + var vgname C.perfstat_id_t + + numvgs := C.perfstat_volumegroup(nil, nil, C.sizeof_perfstat_volumegroup_t, 0) + if numvgs <= 0 { + return nil, fmt.Errorf("perfstat_volumegroup() error") + } + + vg_len := C.sizeof_perfstat_volumegroup_t * C.ulong(numvgs) + vg = (*C.perfstat_volumegroup_t)(C.malloc(vg_len)) + defer C.free(unsafe.Pointer(vg)) + C.strcpy(&vgname.name[0], C.CString("")) + r := C.perfstat_volumegroup(&vgname, vg, C.sizeof_perfstat_volumegroup_t, numvgs) + if r < 0 { + return nil, fmt.Errorf("perfstat_volumegroup() error") + } + vgs := make([]VolumeGroup, r) + for i := 0; i < int(r); i++ { + v := C.get_volumegroup_stat(vg, C.int(i)) + if v != nil { + vgs[i] = perfstatvolumegroup2volumegroup(v) + } + } + return vgs, nil +} diff --git a/vendor/github.com/power-devops/perfstat/memstat.go b/vendor/github.com/power-devops/perfstat/memstat.go new file mode 100644 index 0000000000000..d211a73aac8d1 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/memstat.go @@ -0,0 +1,84 @@ +// +build aix + +package perfstat + +/* +#cgo LDFLAGS: -lperfstat + +#include +#include +#include + +#include "c_helpers.h" +*/ +import "C" + +import ( + "fmt" + "unsafe" +) + +func MemoryTotalStat() (*MemoryTotal, error) { + var memory C.perfstat_memory_total_t + + rc := C.perfstat_memory_total(nil, &memory, C.sizeof_perfstat_memory_total_t, 1) + if rc != 1 { + return nil, fmt.Errorf("perfstat_memory_total() error") + } + m := perfstatmemorytotal2memorytotal(memory) + return &m, nil +} + +func MemoryPageStat() ([]MemoryPage, error) { + var mempage *C.perfstat_memory_page_t + var fps C.perfstat_psize_t + + numps := C.perfstat_memory_page(nil, nil, C.sizeof_perfstat_memory_page_t, 0) + if numps < 1 { + return nil, fmt.Errorf("perfstat_memory_page() error") + } + + mp_len := C.sizeof_perfstat_memory_page_t * C.ulong(numps) + mempage = (*C.perfstat_memory_page_t)(C.malloc(mp_len)) + defer C.free(unsafe.Pointer(mempage)) + fps.psize = C.FIRST_PSIZE + r := C.perfstat_memory_page(&fps, mempage, C.sizeof_perfstat_memory_page_t, numps) + if r < 1 { + return nil, fmt.Errorf("perfstat_memory_page() error") + } + ps := make([]MemoryPage, r) + for i := 0; i < int(r); i++ { + p := C.get_memory_page_stat(mempage, C.int(i)) + if p != nil { + ps[i] = perfstatmemorypage2memorypage(p) + } + } + return ps, nil +} + +func PagingSpaceStat() ([]PagingSpace, error) { + var pspace *C.perfstat_pagingspace_t + var fps C.perfstat_id_t + + numps := C.perfstat_pagingspace(nil, nil, C.sizeof_perfstat_pagingspace_t, 0) + if numps <= 0 { + return nil, fmt.Errorf("perfstat_pagingspace() error") + } + + ps_len := C.sizeof_perfstat_pagingspace_t * C.ulong(numps) + pspace = (*C.perfstat_pagingspace_t)(C.malloc(ps_len)) + defer C.free(unsafe.Pointer(pspace)) + C.strcpy(&fps.name[0], C.CString(C.FIRST_PAGINGSPACE)) + r := C.perfstat_pagingspace(&fps, pspace, C.sizeof_perfstat_pagingspace_t, numps) + if r < 1 { + return nil, fmt.Errorf("perfstat_pagingspace() error") + } + ps := make([]PagingSpace, r) + for i := 0; i < int(r); i++ { + p := C.get_pagingspace_stat(pspace, C.int(i)) + if p != nil { + ps[i] = perfstatpagingspace2pagingspace(p) + } + } + return ps, nil +} diff --git a/vendor/github.com/power-devops/perfstat/netstat.go b/vendor/github.com/power-devops/perfstat/netstat.go new file mode 100644 index 0000000000000..4070da211bc3d --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/netstat.go @@ -0,0 +1,117 @@ +// +build aix + +package perfstat + +/* +#cgo LDFLAGS: -lperfstat + +#include +#include +#include + +#include "c_helpers.h" +*/ +import "C" + +import ( + "fmt" + "unsafe" +) + +func NetIfaceTotalStat() (*NetIfaceTotal, error) { + var nif C.perfstat_netinterface_total_t + + rc := C.perfstat_netinterface_total(nil, &nif, C.sizeof_perfstat_netinterface_total_t, 1) + if rc != 1 { + return nil, fmt.Errorf("perfstat_netinterface_total() error") + } + n := perfstatnetinterfacetotal2netifacetotal(nif) + return &n, nil +} + +func NetBufferStat() ([]NetBuffer, error) { + var nbuf *C.perfstat_netbuffer_t + var first C.perfstat_id_t + + numbuf := C.perfstat_netbuffer(nil, nil, C.sizeof_perfstat_netbuffer_t, 0) + if numbuf < 1 { + return nil, fmt.Errorf("perfstat_netbuffer() error") + } + + nblen := C.sizeof_perfstat_netbuffer_t * C.ulong(numbuf) + nbuf = (*C.perfstat_netbuffer_t)(C.malloc(nblen)) + defer C.free(unsafe.Pointer(nbuf)) + C.strcpy(&first.name[0], C.CString(C.FIRST_NETBUFFER)) + r := C.perfstat_netbuffer(&first, nbuf, C.sizeof_perfstat_netbuffer_t, numbuf) + if r < 0 { + return nil, fmt.Errorf("perfstat_netbuffer() error") + } + nb := make([]NetBuffer, r) + for i := 0; i < int(r); i++ { + b := C.get_netbuffer_stat(nbuf, C.int(i)) + if b != nil { + nb[i] = perfstatnetbuffer2netbuffer(b) + } + } + return nb, nil +} + +func NetIfaceStat() ([]NetIface, error) { + var nif *C.perfstat_netinterface_t + var first C.perfstat_id_t + + numif := C.perfstat_netinterface(nil, nil, C.sizeof_perfstat_netinterface_t, 0) + if numif < 0 { + return nil, fmt.Errorf("perfstat_netinterface() error") + } + if numif == 0 { + return []NetIface{}, fmt.Errorf("no network interfaces found") + } + + iflen := C.sizeof_perfstat_netinterface_t * C.ulong(numif) + nif = (*C.perfstat_netinterface_t)(C.malloc(iflen)) + defer C.free(unsafe.Pointer(nif)) + C.strcpy(&first.name[0], C.CString(C.FIRST_NETINTERFACE)) + r := C.perfstat_netinterface(&first, nif, C.sizeof_perfstat_netinterface_t, numif) + if r < 0 { + return nil, fmt.Errorf("perfstat_netinterface() error") + } + ifs := make([]NetIface, r) + for i := 0; i < int(r); i++ { + b := C.get_netinterface_stat(nif, C.int(i)) + if b != nil { + ifs[i] = perfstatnetinterface2netiface(b) + } + } + return ifs, nil +} + +func NetAdapterStat() ([]NetAdapter, error) { + var adapters *C.perfstat_netadapter_t + var first C.perfstat_id_t + + numad := C.perfstat_netadapter(nil, nil, C.sizeof_perfstat_netadapter_t, 0) + if numad < 0 { + return nil, fmt.Errorf("perfstat_netadater() error") + } + if numad == 0 { + return []NetAdapter{}, fmt.Errorf("no network adapters found") + } + + adplen := C.sizeof_perfstat_netadapter_t * C.ulong(numad) + adapters = (*C.perfstat_netadapter_t)(C.malloc(adplen)) + defer C.free(unsafe.Pointer(adapters)) + C.strcpy(&first.name[0], C.CString(C.FIRST_NETINTERFACE)) + r := C.perfstat_netadapter(&first, adapters, C.sizeof_perfstat_netadapter_t, numad) + if r < 0 { + return nil, fmt.Errorf("perfstat_netadapter() error") + } + ads := make([]NetAdapter, r) + for i := 0; i < int(r); i++ { + b := C.get_netadapter_stat(adapters, C.int(i)) + if b != nil { + ads[i] = perfstatnetadapter2netadapter(b) + } + } + return ads, nil +} diff --git a/vendor/github.com/power-devops/perfstat/procstat.go b/vendor/github.com/power-devops/perfstat/procstat.go new file mode 100644 index 0000000000000..ecafebd8db25f --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/procstat.go @@ -0,0 +1,75 @@ +// +build aix + +package perfstat + +/* +#cgo LDFLAGS: -lperfstat + +#include +#include +#include + +#include "c_helpers.h" +*/ +import "C" + +import ( + "fmt" + "unsafe" +) + +func ProcessStat() ([]Process, error) { + var proc *C.perfstat_process_t + var first C.perfstat_id_t + + numproc := C.perfstat_process(nil, nil, C.sizeof_perfstat_process_t, 0) + if numproc < 1 { + return nil, fmt.Errorf("perfstat_process() error") + } + + plen := C.sizeof_perfstat_process_t * C.ulong(numproc) + proc = (*C.perfstat_process_t)(C.malloc(plen)) + defer C.free(unsafe.Pointer(proc)) + C.strcpy(&first.name[0], C.CString("")) + r := C.perfstat_process(&first, proc, C.sizeof_perfstat_process_t, numproc) + if r < 0 { + return nil, fmt.Errorf("perfstat_process() error") + } + + ps := make([]Process, r) + for i := 0; i < int(r); i++ { + p := C.get_process_stat(proc, C.int(i)) + if p != nil { + ps[i] = perfstatprocess2process(p) + } + } + return ps, nil +} + +func ThreadStat() ([]Thread, error) { + var thread *C.perfstat_thread_t + var first C.perfstat_id_t + + numthr := C.perfstat_thread(nil, nil, C.sizeof_perfstat_thread_t, 0) + if numthr < 1 { + return nil, fmt.Errorf("perfstat_thread() error") + } + + thlen := C.sizeof_perfstat_thread_t * C.ulong(numthr) + thread = (*C.perfstat_thread_t)(C.malloc(thlen)) + defer C.free(unsafe.Pointer(thread)) + C.strcpy(&first.name[0], C.CString("")) + r := C.perfstat_thread(&first, thread, C.sizeof_perfstat_thread_t, numthr) + if r < 0 { + return nil, fmt.Errorf("perfstat_thread() error") + } + + th := make([]Thread, r) + for i := 0; i < int(r); i++ { + t := C.get_thread_stat(thread, C.int(i)) + if t != nil { + th[i] = perfstatthread2thread(t) + } + } + return th, nil +} diff --git a/vendor/github.com/power-devops/perfstat/sysconf.go b/vendor/github.com/power-devops/perfstat/sysconf.go new file mode 100644 index 0000000000000..c7454d03d49cc --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/sysconf.go @@ -0,0 +1,195 @@ +// +build aix + +package perfstat + +/* +#include +*/ +import "C" + +import "fmt" + +const ( + SC_ARG_MAX = 0 + SC_CHILD_MAX = 1 + SC_CLK_TCK = 2 + SC_NGROUPS_MAX = 3 + SC_OPEN_MAX = 4 + SC_STREAM_MAX = 5 + SC_TZNAME_MAX = 6 + SC_JOB_CONTROL = 7 + SC_SAVED_IDS = 8 + SC_VERSION = 9 + SC_POSIX_ARG_MAX = 10 + SC_POSIX_CHILD_MAX = 11 + SC_POSIX_LINK_MAX = 12 + SC_POSIX_MAX_CANON = 13 + SC_POSIX_MAX_INPUT = 14 + SC_POSIX_NAME_MAX = 15 + SC_POSIX_NGROUPS_MAX = 16 + SC_POSIX_OPEN_MAX = 17 + SC_POSIX_PATH_MAX = 18 + SC_POSIX_PIPE_BUF = 19 + SC_POSIX_SSIZE_MAX = 20 + SC_POSIX_STREAM_MAX = 21 + SC_POSIX_TZNAME_MAX = 22 + SC_BC_BASE_MAX = 23 + SC_BC_DIM_MAX = 24 + SC_BC_SCALE_MAX = 25 + SC_BC_STRING_MAX = 26 + SC_EQUIV_CLASS_MAX = 27 + SC_EXPR_NEST_MAX = 28 + SC_LINE_MAX = 29 + SC_RE_DUP_MAX = 30 + SC_2_VERSION = 31 + SC_2_C_DEV = 32 + SC_2_FORT_DEV = 33 + SC_2_FORT_RUN = 34 + SC_2_LOCALEDEF = 35 + SC_2_SW_DEV = 36 + SC_POSIX2_BC_BASE_MAX = 37 + SC_POSIX2_BC_DIM_MAX = 38 + SC_POSIX2_BC_SCALE_MAX = 39 + SC_POSIX2_BC_STRING_MAX = 40 + SC_POSIX2_BC_EQUIV_CLASS_MAX = 41 + SC_POSIX2_BC_EXPR_NEST_MAX = 42 + SC_POSIX2_BC_LINE_MAX = 43 + SC_POSIX2_BC_RE_DUP_MAX = 44 + SC_PASS_MAX = 45 + SC_XOPEN_VERSION = 46 + SC_ATEXIT_MAX = 47 + SC_PAGE_SIZE = 48 + SC_PAGESIZE = SC_PAGE_SIZE + SC_AES_OS_VERSION = 49 + SC_COLL_WEIGHTS_MAX = 50 + SC_2_C_WIND = 51 + SC_2_C_VERSION = 52 + SC_2_UPE = 53 + SC_2_CHAR_TERM = 54 + SC_XOPEN_SHM = 55 + SC_XOPEN_CRYPT = 56 + SC_XOPEN_ENH_I18N = 57 + SC_IOV_MAX = 58 + SC_THREAD_SAFE_FUNCTIONS = 59 + SC_THREADS = 60 + SC_THREAD_ATTR_STACKADDR = 61 + SC_THREAD_ATTR_STACKSIZE = 62 + SC_THREAD_FORKALL = 63 + SC_THREAD_PRIORITY_SCHEDULING = 64 + SC_THREAD_PRIO_INHERIT = 65 + SC_THREAD_PRIO_PROTECT = 66 + SC_THREAD_PROCESS_SHARED = 67 + SC_THREAD_KEYS_MAX = 68 + SC_THREAD_DATAKEYS_MAX = SC_THREAD_KEYS_MAX + SC_THREAD_STACK_MIN = 69 + SC_THREAD_THREADS_MAX = 70 + SC_NPROCESSORS_CONF = 71 + SC_NPROCESSORS_ONLN = 72 + SC_XOPEN_UNIX = 73 + SC_AIO_LISTIO_MAX = 75 + SC_AIO_MAX = 76 + SC_AIO_PRIO_DELTA_MAX = 77 + SC_ASYNCHRONOUS_IO = 78 + SC_DELAYTIMER_MAX = 79 + SC_FSYNC = 80 + SC_GETGR_R_SIZE_MAX = 81 + SC_GETPW_R_SIZE_MAX = 82 + SC_LOGIN_NAME_MAX = 83 + SC_MAPPED_FILES = 84 + SC_MEMLOCK = 85 + SC_MEMLOCK_RANGE = 86 + SC_MEMORY_PROTECTION = 87 + SC_MESSAGE_PASSING = 88 + SC_MQ_OPEN_MAX = 89 + SC_MQ_PRIO_MAX = 90 + SC_PRIORITIZED_IO = 91 + SC_PRIORITY_SCHEDULING = 92 + SC_REALTIME_SIGNALS = 93 + SC_RTSIG_MAX = 94 + SC_SEMAPHORES = 95 + SC_SEM_NSEMS_MAX = 96 + SC_SEM_VALUE_MAX = 97 + SC_SHARED_MEMORY_OBJECTS = 98 + SC_SIGQUEUE_MAX = 99 + SC_SYNCHRONIZED_IO = 100 + SC_THREAD_DESTRUCTOR_ITERATIONS = 101 + SC_TIMERS = 102 + SC_TIMER_MAX = 103 + SC_TTY_NAME_MAX = 104 + SC_XBS5_ILP32_OFF32 = 105 + SC_XBS5_ILP32_OFFBIG = 106 + SC_XBS5_LP64_OFF64 = 107 + SC_XBS5_LPBIG_OFFBIG = 108 + SC_XOPEN_XCU_VERSION = 109 + SC_XOPEN_REALTIME = 110 + SC_XOPEN_REALTIME_THREADS = 111 + SC_XOPEN_LEGACY = 112 + SC_REENTRANT_FUNCTIONS = SC_THREAD_SAFE_FUNCTIONS + SC_PHYS_PAGES = 113 + SC_AVPHYS_PAGES = 114 + SC_LPAR_ENABLED = 115 + SC_LARGE_PAGESIZE = 116 + SC_AIX_KERNEL_BITMODE = 117 + SC_AIX_REALMEM = 118 + SC_AIX_HARDWARE_BITMODE = 119 + SC_AIX_MP_CAPABLE = 120 + SC_V6_ILP32_OFF32 = 121 + SC_V6_ILP32_OFFBIG = 122 + SC_V6_LP64_OFF64 = 123 + SC_V6_LPBIG_OFFBIG = 124 + SC_XOPEN_STREAMS = 125 + SC_HOST_NAME_MAX = 126 + SC_REGEXP = 127 + SC_SHELL = 128 + SC_SYMLOOP_MAX = 129 + SC_ADVISORY_INFO = 130 + SC_FILE_LOCKING = 131 + SC_2_PBS = 132 + SC_2_PBS_ACCOUNTING = 133 + SC_2_PBS_CHECKPOINT = 134 + SC_2_PBS_LOCATE = 135 + SC_2_PBS_MESSAGE = 136 + SC_2_PBS_TRACK = 137 + SC_BARRIERS = 138 + SC_CLOCK_SELECTION = 139 + SC_CPUTIME = 140 + SC_MONOTONIC_CLOCK = 141 + SC_READER_WRITER_LOCKS = 142 + SC_SPAWN = 143 + SC_SPIN_LOCKS = 144 + SC_SPORADIC_SERVER = 145 + SC_THREAD_CPUTIME = 146 + SC_THREAD_SPORADIC_SERVER = 147 + SC_TIMEOUTS = 148 + SC_TRACE = 149 + SC_TRACE_EVENT_FILTER = 150 + SC_TRACE_INHERIT = 151 + SC_TRACE_LOG = 152 + SC_TYPED_MEMORY_OBJECTS = 153 + SC_IPV6 = 154 + SC_RAW_SOCKETS = 155 + SC_SS_REPL_MAX = 156 + SC_TRACE_EVENT_NAME_MAX = 157 + SC_TRACE_NAME_MAX = 158 + SC_TRACE_SYS_MAX = 159 + SC_TRACE_USER_EVENT_MAX = 160 + SC_AIX_UKEYS = 161 + SC_AIX_ENHANCED_AFFINITY = 162 + SC_V7_ILP32_OFF32 = 163 + SC_V7_ILP32_OFFBIG = 164 + SC_V7_LP64_OFF64 = 165 + SC_V7_LPBIG_OFFBIG = 166 + SC_THREAD_ROBUST_PRIO_INHERIT = 167 + SC_THREAD_ROBUST_PRIO_PROTECT = 168 + SC_XOPEN_UUCP = 169 + SC_XOPEN_ARMOR = 170 +) + +func Sysconf(name int32) (int64, error) { + r := C.sysconf(C.int(name)) + if r == -1 { + return 0, fmt.Errorf("sysconf error") + } else { + return int64(r), nil + } +} diff --git a/vendor/github.com/power-devops/perfstat/systemcfg.go b/vendor/github.com/power-devops/perfstat/systemcfg.go new file mode 100644 index 0000000000000..6287eb46ab8ba --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/systemcfg.go @@ -0,0 +1,635 @@ +// +build aix + +package perfstat + +import "golang.org/x/sys/unix" + +// function Getsystemcfg() is defined in golang.org/x/sys/unix +// we define here just missing constants for the function and some helpers + +// Calls to getsystemcfg() +const ( + SC_ARCH = 1 /* processor architecture */ + SC_IMPL = 2 /* processor implementation */ + SC_VERS = 3 /* processor version */ + SC_WIDTH = 4 /* width (32 || 64) */ + SC_NCPUS = 5 /* 1 = UP, n = n-way MP */ + SC_L1C_ATTR = 6 /* L1 cache attributes (bit flags) */ + SC_L1C_ISZ = 7 /* size of L1 instruction cache */ + SC_L1C_DSZ = 8 /* size of L1 data cache */ + SC_L1C_ICA = 9 /* L1 instruction cache associativity */ + SC_L1C_DCA = 10 /* L1 data cache associativity */ + SC_L1C_IBS = 11 /* L1 instruction cache block size */ + SC_L1C_DBS = 12 /* L1 data cache block size */ + SC_L1C_ILS = 13 /* L1 instruction cache line size */ + SC_L1C_DLS = 14 /* L1 data cache line size */ + SC_L2C_SZ = 15 /* size of L2 cache, 0 = No L2 cache */ + SC_L2C_AS = 16 /* L2 cache associativity */ + SC_TLB_ATTR = 17 /* TLB attributes (bit flags) */ + SC_ITLB_SZ = 18 /* entries in instruction TLB */ + SC_DTLB_SZ = 19 /* entries in data TLB */ + SC_ITLB_ATT = 20 /* instruction tlb associativity */ + SC_DTLB_ATT = 21 /* data tlb associativity */ + SC_RESRV_SZ = 22 /* size of reservation */ + SC_PRI_LC = 23 /* spin lock count in supevisor mode */ + SC_PRO_LC = 24 /* spin lock count in problem state */ + SC_RTC_TYPE = 25 /* RTC type */ + SC_VIRT_AL = 26 /* 1 if hardware aliasing is supported */ + SC_CAC_CONG = 27 /* number of page bits for cache synonym */ + SC_MOD_ARCH = 28 /* used by system for model determination */ + SC_MOD_IMPL = 29 /* used by system for model determination */ + SC_XINT = 30 /* used by system for time base conversion */ + SC_XFRAC = 31 /* used by system for time base conversion */ + SC_KRN_ATTR = 32 /* kernel attributes, see below */ + SC_PHYSMEM = 33 /* bytes of OS available memory */ + SC_SLB_ATTR = 34 /* SLB attributes */ + SC_SLB_SZ = 35 /* size of slb (0 = no slb) */ + SC_ORIG_NCPUS = 36 /* original number of CPUs */ + SC_MAX_NCPUS = 37 /* max cpus supported by this AIX image */ + SC_MAX_REALADDR = 38 /* max supported real memory address +1 */ + SC_ORIG_ENT_CAP = 39 /* configured entitled processor capacity at boot required by cross-partition LPAR tools. */ + SC_ENT_CAP = 40 /* entitled processor capacity */ + SC_DISP_WHE = 41 /* Dispatch wheel time period (TB units) */ + SC_CAPINC = 42 /* delta by which capacity can change */ + SC_VCAPW = 43 /* priority weight for idle capacity distribution */ + SC_SPLP_STAT = 44 /* State of SPLPAR enablement: 0x1 => 1=SPLPAR capable; 0=not, 0x2 => SPLPAR enabled 0=dedicated, 1=shared */ + SC_SMT_STAT = 45 /* State of SMT enablement: 0x1 = SMT Capable 0=no/1=yes, 0x2 = SMT Enabled 0=no/1=yes, 0x4 = SMT threads bound true 0=no/1=yes */ + SC_SMT_TC = 46 /* Number of SMT Threads per Physical CPU */ + SC_VMX_VER = 47 /* RPA defined VMX version: 0 = VMX not available or disabled, 1 = VMX capable, 2 = VMX and VSX capable */ + SC_LMB_SZ = 48 /* Size of an LMB on this system. */ + SC_MAX_XCPU = 49 /* Number of exclusive cpus on line */ + SC_EC_LVL = 50 /* Kernel error checking level */ + SC_AME_STAT = 51 /* AME status */ + SC_ECO_STAT = 52 /* extended cache options */ + SC_DFP_STAT = 53 /* RPA defined DFP version, 0=none/disabled */ + SC_VRM_STAT = 54 /* VRM Capable/enabled */ + SC_PHYS_IMP = 55 /* physical processor implementation */ + SC_PHYS_VER = 56 /* physical processor version */ + SC_SPCM_STATUS = 57 + SC_SPCM_MAX = 58 + SC_TM_VER = 59 /* Transaction Memory version, 0 - not capable */ + SC_NX_CAP = 60 /* NX GZIP capable */ + SC_PKS_STATE = 61 /* Platform KeyStore */ +) + +/* kernel attributes */ +/* bit 0/1 meaning */ +/* -----------------------------------------*/ +/* 31 32-bit kernel / 64-bit kernel */ +/* 30 non-LPAR / LPAR */ +/* 29 old 64bit ABI / 64bit Large ABI */ +/* 28 non-NUMA / NUMA */ +/* 27 UP / MP */ +/* 26 no DR CPU add / DR CPU add support */ +/* 25 no DR CPU rm / DR CPU rm support */ +/* 24 no DR MEM add / DR MEM add support */ +/* 23 no DR MEM rm / DR MEM rm support */ +/* 22 kernel keys disabled / enabled */ +/* 21 no recovery / recovery enabled */ +/* 20 non-MLS / MLS enabled */ +/* 19 enhanced affinity indicator */ +/* 18 non-vTPM / vTPM enabled */ +/* 17 non-VIOS / VIOS */ + +// Values for architecture field +const ( + ARCH_POWER_RS = 0x0001 /* Power Classic architecture */ + ARCH_POWER_PC = 0x0002 /* Power PC architecture */ + ARCH_IA64 = 0x0003 /* Intel IA64 architecture */ +) + +// Values for implementation field for POWER_PC Architectures +const ( + IMPL_POWER_RS1 = 0x00001 /* RS1 class CPU */ + IMPL_POWER_RSC = 0x00002 /* RSC class CPU */ + IMPL_POWER_RS2 = 0x00004 /* RS2 class CPU */ + IMPL_POWER_601 = 0x00008 /* 601 class CPU */ + IMPL_POWER_603 = 0x00020 /* 603 class CPU */ + IMPL_POWER_604 = 0x00010 /* 604 class CPU */ + IMPL_POWER_620 = 0x00040 /* 620 class CPU */ + IMPL_POWER_630 = 0x00080 /* 630 class CPU */ + IMPL_POWER_A35 = 0x00100 /* A35 class CPU */ + IMPL_POWER_RS64II = 0x0200 /* RS64-II class CPU */ + IMPL_POWER_RS64III = 0x0400 /* RS64-III class CPU */ + IMPL_POWER4 = 0x0800 /* 4 class CPU */ + IMPL_POWER_RS64IV = IMPL_POWER4 /* 4 class CPU */ + IMPL_POWER_MPC7450 = 0x1000 /* MPC7450 class CPU */ + IMPL_POWER5 = 0x2000 /* 5 class CPU */ + IMPL_POWER6 = 0x4000 /* 6 class CPU */ + IMPL_POWER7 = 0x8000 /* 7 class CPU */ + IMPL_POWER8 = 0x10000 /* 8 class CPU */ + IMPL_POWER9 = 0x20000 /* 9 class CPU */ +) + +// Values for implementation field for IA64 Architectures +const ( + IMPL_IA64_M1 = 0x0001 /* IA64 M1 class CPU (Itanium) */ + IMPL_IA64_M2 = 0x0002 /* IA64 M2 class CPU */ +) + +// Values for the version field +const ( + PV_601 = 0x010001 /* Power PC 601 */ + PV_601A = 0x010002 /* Power PC 601 */ + PV_603 = 0x060000 /* Power PC 603 */ + PV_604 = 0x050000 /* Power PC 604 */ + PV_620 = 0x070000 /* Power PC 620 */ + PV_630 = 0x080000 /* Power PC 630 */ + PV_A35 = 0x090000 /* Power PC A35 */ + PV_RS64II = 0x0A0000 /* Power PC RS64II */ + PV_RS64III = 0x0B0000 /* Power PC RS64III */ + PV_4 = 0x0C0000 /* Power PC 4 */ + PV_RS64IV = PV_4 /* Power PC 4 */ + PV_MPC7450 = 0x0D0000 /* Power PC MPC7450 */ + PV_4_2 = 0x0E0000 /* Power PC 4 */ + PV_4_3 = 0x0E0001 /* Power PC 4 */ + PV_5 = 0x0F0000 /* Power PC 5 */ + PV_5_2 = 0x0F0001 /* Power PC 5 */ + PV_5_3 = 0x0F0002 /* Power PC 5 */ + PV_6 = 0x100000 /* Power PC 6 */ + PV_6_1 = 0x100001 /* Power PC 6 DD1.x */ + PV_7 = 0x200000 /* Power PC 7 */ + PV_8 = 0x300000 /* Power PC 8 */ + PV_9 = 0x400000 /* Power PC 9 */ + PV_5_Compat = 0x0F8000 /* Power PC 5 */ + PV_6_Compat = 0x108000 /* Power PC 6 */ + PV_7_Compat = 0x208000 /* Power PC 7 */ + PV_8_Compat = 0x308000 /* Power PC 8 */ + PV_9_Compat = 0x408000 /* Power PC 9 */ + PV_RESERVED_2 = 0x0A0000 /* source compatability */ + PV_RESERVED_3 = 0x0B0000 /* source compatability */ + PV_RS2 = 0x040000 /* Power RS2 */ + PV_RS1 = 0x020000 /* Power RS1 */ + PV_RSC = 0x030000 /* Power RSC */ + PV_M1 = 0x008000 /* Intel IA64 M1 */ + PV_M2 = 0x008001 /* Intel IA64 M2 */ +) + +// Values for rtc_type +const ( + RTC_POWER = 1 /* rtc as defined by Power Arch. */ + RTC_POWER_PC = 2 /* rtc as defined by Power PC Arch. */ + RTC_IA64 = 3 /* rtc as defined by IA64 Arch. */ +) + +const NX_GZIP_PRESENT = 0x00000001 + +const ( + PKS_STATE_CAPABLE = 1 + PKS_STATE_ENABLED = 2 +) + +// Macros for identifying physical processor +const ( + PPI4_1 = 0x35 + PPI4_2 = 0x38 + PPI4_3 = 0x39 + PPI4_4 = 0x3C + PPI4_5 = 0x44 + PPI5_1 = 0x3A + PPI5_2 = 0x3B + PPI6_1 = 0x3E + PPI7_1 = 0x3F + PPI7_2 = 0x4A + PPI8_1 = 0x4B + PPI8_2 = 0x4D + PPI9 = 0x4E +) + +// Macros for kernel attributes +const ( + KERN_TYPE = 0x1 + KERN_LPAR = 0x2 + KERN_64BIT_LARGE_ABI = 0x4 + KERN_NUMA = 0x8 + KERN_UPMP = 0x10 + KERN_DR_CPU_ADD = 0x20 + KERN_DR_CPU_RM = 0x40 + KERN_DR_MEM_ADD = 0x80 + KERN_DR_MEM_RM = 0x100 + KERN_KKEY_ENABLED = 0x200 + KERN_RECOVERY = 0x400 + KERN_MLS = 0x800 + KERN_ENH_AFFINITY = 0x1000 + KERN_VTPM = 0x2000 + KERN_VIOS = 0x4000 +) + +// macros for SPLPAR environment. +const ( + SPLPAR_CAPABLE = 0x1 + SPLPAR_ENABLED = 0x2 + SPLPAR_DONATE_CAPABLE = 0x4 +) + +// Macros for SMT status determination +const ( + SMT_CAPABLE = 0x1 + SMT_ENABLE = 0x2 + SMT_BOUND = 0x4 + SMT_ORDER = 0x8 +) + +// Macros for VRM status determination +const ( + VRM_CAPABLE = 0x1 + VRM_ENABLE = 0x2 + CMOX_CAPABLE = 0x4 +) + +// Macros for AME status determination +const AME_ENABLE = 0x1 + +// Macros for extended cache options +const ( + ECO_CAPABLE = 0x1 + ECO_ENABLE = 0x2 +) + +// These define blocks of values for model_arch and model_impl that are reserved for OEM use. +const ( + MODEL_ARCH_RSPC = 2 + MODEL_ARCH_CHRP = 3 + MODEL_ARCH_IA64 = 4 + MODEL_ARCH_OEM_START = 1024 + MODEL_ARCH_OEM_END = 2047 + MODEL_IMPL_RS6K_UP_MCA = 1 + MODEL_IMPL_RS6K_SMP_MCA = 2 + MODEL_IMPL_RSPC_UP_PCI = 3 + MODEL_IMPL_RSPC_SMP_PCI = 4 + MODEL_IMPL_CHRP_UP_PCI = 5 + MODEL_IMPL_CHRP_SMP_PCI = 6 + MODEL_IMPL_IA64_COM = 7 + MODEL_IMPL_IA64_SOFTSDV = 8 + MODEL_IMPL_MAMBO_SIM = 9 + MODEL_IMPL_POWER_KVM = 10 + MODEL_IMPL_OEM_START = 1024 + MODEL_IMPL_OEM_END = 2047 +) + +// example determining processor compatibilty mode on AIX: +// impl := unix.Getsystemcfg(SC_IMPL) +// if impl&IMPL_POWER8 != 0 { +// // we are running on POWER8 +// } +// if impl&IMPL_POWER9 != 0 { +// // we are running on POWER9 +// } + +func GetCPUImplementation() string { + impl := unix.Getsystemcfg(SC_IMPL) + switch { + case impl&IMPL_POWER4 != 0: + return "POWER4" + case impl&IMPL_POWER5 != 0: + return "POWER5" + case impl&IMPL_POWER6 != 0: + return "POWER6" + case impl&IMPL_POWER7 != 0: + return "POWER7" + case impl&IMPL_POWER8 != 0: + return "POWER8" + case impl&IMPL_POWER9 != 0: + return "POWER9" + default: + return "Unknown" + } +} + +func POWER9OrNewer() bool { + impl := unix.Getsystemcfg(SC_IMPL) + if impl&IMPL_POWER9 != 0 { + return true + } + return false +} + +func POWER9() bool { + impl := unix.Getsystemcfg(SC_IMPL) + if impl&IMPL_POWER9 != 0 { + return true + } + return false +} + +func POWER8OrNewer() bool { + impl := unix.Getsystemcfg(SC_IMPL) + if impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 { + return true + } + return false +} + +func POWER8() bool { + impl := unix.Getsystemcfg(SC_IMPL) + if impl&IMPL_POWER8 != 0 { + return true + } + return false +} + +func POWER7OrNewer() bool { + impl := unix.Getsystemcfg(SC_IMPL) + if impl&IMPL_POWER9 != 0 || impl&IMPL_POWER8 != 0 || impl&IMPL_POWER7 != 0 { + return true + } + return false +} + +func POWER7() bool { + impl := unix.Getsystemcfg(SC_IMPL) + if impl&IMPL_POWER7 != 0 { + return true + } + return false +} + +func HasTransactionalMemory() bool { + impl := unix.Getsystemcfg(SC_TM_VER) + if impl > 0 { + return true + } + return false +} + +func Is64Bit() bool { + impl := unix.Getsystemcfg(SC_WIDTH) + if impl == 64 { + return true + } + return false +} + +func IsSMP() bool { + impl := unix.Getsystemcfg(SC_NCPUS) + if impl > 1 { + return true + } + return false +} + +func HasVMX() bool { + impl := unix.Getsystemcfg(SC_VMX_VER) + if impl > 0 { + return true + } + return false +} + +func HasVSX() bool { + impl := unix.Getsystemcfg(SC_VMX_VER) + if impl > 1 { + return true + } + return false +} + +func HasDFP() bool { + impl := unix.Getsystemcfg(SC_DFP_STAT) + if impl > 1 { + return true + } + return false +} + +func HasNxGzip() bool { + impl := unix.Getsystemcfg(SC_NX_CAP) + if impl&NX_GZIP_PRESENT > 0 { + return true + } + return false +} + +func PksCapable() bool { + impl := unix.Getsystemcfg(SC_PKS_STATE) + if impl&PKS_STATE_CAPABLE > 0 { + return true + } + return false +} + +func PksEnabled() bool { + impl := unix.Getsystemcfg(SC_PKS_STATE) + if impl&PKS_STATE_ENABLED > 0 { + return true + } + return false +} + +func CPUMode() string { + impl := unix.Getsystemcfg(SC_VERS) + switch impl { + case PV_9, PV_9_Compat: + return "POWER9" + case PV_8, PV_8_Compat: + return "POWER8" + case PV_7, PV_7_Compat: + return "POWER7" + default: + return "Unknown" + } +} + +func KernelBits() int { + impl := unix.Getsystemcfg(SC_KRN_ATTR) + if impl&KERN_TYPE == KERN_TYPE { + return 64 + } + return 32 +} + +func IsLPAR() bool { + impl := unix.Getsystemcfg(SC_KRN_ATTR) + if impl&KERN_LPAR == KERN_LPAR { + return true + } + return false +} + +func CpuAddCapable() bool { + impl := unix.Getsystemcfg(SC_KRN_ATTR) + if impl&KERN_DR_CPU_ADD == KERN_DR_CPU_ADD { + return true + } + return false +} + +func CpuRemoveCapable() bool { + impl := unix.Getsystemcfg(SC_KRN_ATTR) + if impl&KERN_DR_CPU_RM == KERN_DR_CPU_RM { + return true + } + return false +} + +func MemoryAddCapable() bool { + impl := unix.Getsystemcfg(SC_KRN_ATTR) + if impl&KERN_DR_MEM_ADD == KERN_DR_MEM_ADD { + return true + } + return false +} + +func MemoryRemoveCapable() bool { + impl := unix.Getsystemcfg(SC_KRN_ATTR) + if impl&KERN_DR_MEM_RM == KERN_DR_MEM_RM { + return true + } + return false +} + +func DLparCapable() bool { + impl := unix.Getsystemcfg(SC_KRN_ATTR) + if impl&(KERN_DR_CPU_ADD|KERN_DR_CPU_RM|KERN_DR_MEM_ADD|KERN_DR_MEM_RM) > 0 { + return true + } + return false +} + +func IsNUMA() bool { + impl := unix.Getsystemcfg(SC_KRN_ATTR) + if impl&KERN_NUMA > 0 { + return true + } + return false +} + +func KernelKeys() bool { + impl := unix.Getsystemcfg(SC_KRN_ATTR) + if impl&KERN_KKEY_ENABLED > 0 { + return true + } + return false +} + +func RecoveryMode() bool { + impl := unix.Getsystemcfg(SC_KRN_ATTR) + if impl&KERN_RECOVERY > 0 { + return true + } + return false +} + +func EnhancedAffinity() bool { + impl := unix.Getsystemcfg(SC_KRN_ATTR) + if impl&KERN_ENH_AFFINITY > 0 { + return true + } + return false +} + +func VTpmEnabled() bool { + impl := unix.Getsystemcfg(SC_KRN_ATTR) + if impl&KERN_VTPM > 0 { + return true + } + return false +} + +func IsVIOS() bool { + impl := unix.Getsystemcfg(SC_KRN_ATTR) + if impl&KERN_VIOS > 0 { + return true + } + return false +} + +func MLSEnabled() bool { + impl := unix.Getsystemcfg(SC_KRN_ATTR) + if impl&KERN_MLS > 0 { + return true + } + return false +} + +func SPLparCapable() bool { + impl := unix.Getsystemcfg(SC_SPLP_STAT) + if impl&SPLPAR_CAPABLE > 0 { + return true + } + return false +} + +func SPLparEnabled() bool { + impl := unix.Getsystemcfg(SC_SPLP_STAT) + if impl&SPLPAR_ENABLED > 0 { + return true + } + return false +} + +func DedicatedLpar() bool { + return !SPLparEnabled() +} + +func SPLparCapped() bool { + impl := unix.Getsystemcfg(SC_VCAPW) + if impl == 0 { + return true + } + return false +} + +func SPLparDonating() bool { + impl := unix.Getsystemcfg(SC_SPLP_STAT) + if impl&SPLPAR_DONATE_CAPABLE > 0 { + return true + } + return false +} + +func SmtCapable() bool { + impl := unix.Getsystemcfg(SC_SMT_STAT) + if impl&SMT_CAPABLE > 0 { + return true + } + return false +} + +func SmtEnabled() bool { + impl := unix.Getsystemcfg(SC_SMT_STAT) + if impl&SMT_ENABLE > 0 { + return true + } + return false +} + +func VrmCapable() bool { + impl := unix.Getsystemcfg(SC_VRM_STAT) + if impl&VRM_CAPABLE > 0 { + return true + } + return false +} + +func VrmEnabled() bool { + impl := unix.Getsystemcfg(SC_VRM_STAT) + if impl&VRM_ENABLE > 0 { + return true + } + return false +} + +func AmeEnabled() bool { + impl := unix.Getsystemcfg(SC_AME_STAT) + if impl&AME_ENABLE > 0 { + return true + } + return false +} + +func EcoCapable() bool { + impl := unix.Getsystemcfg(SC_ECO_STAT) + if impl&ECO_CAPABLE > 0 { + return true + } + return false +} + +func EcoEnabled() bool { + impl := unix.Getsystemcfg(SC_ECO_STAT) + if impl&ECO_ENABLE > 0 { + return true + } + return false +} diff --git a/vendor/github.com/power-devops/perfstat/types_cpu.go b/vendor/github.com/power-devops/perfstat/types_cpu.go new file mode 100644 index 0000000000000..84425e92f5029 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/types_cpu.go @@ -0,0 +1,186 @@ +package perfstat + +type CPU struct { + Name string /* logical processor name (cpu0, cpu1, ..) */ + User int64 /* raw number of clock ticks spent in user mode */ + Sys int64 /* raw number of clock ticks spent in system mode */ + Idle int64 /* raw number of clock ticks spent idle */ + Wait int64 /* raw number of clock ticks spent waiting for I/O */ + PSwitch int64 /* number of context switches (changes of currently running process) */ + Syscall int64 /* number of system calls executed */ + Sysread int64 /* number of read system calls executed */ + Syswrite int64 /* number of write system calls executed */ + Sysfork int64 /* number of fork system call executed */ + Sysexec int64 /* number of exec system call executed */ + Readch int64 /* number of characters tranferred with read system call */ + Writech int64 /* number of characters tranferred with write system call */ + Bread int64 /* number of block reads */ + Bwrite int64 /* number of block writes */ + Lread int64 /* number of logical read requests */ + Lwrite int64 /* number of logical write requests */ + Phread int64 /* number of physical reads (reads on raw device) */ + Phwrite int64 /* number of physical writes (writes on raw device) */ + Iget int64 /* number of inode lookups */ + Namei int64 /* number of vnode lookup from a path name */ + Dirblk int64 /* number of 512-byte block reads by the directory search routine to locate an entry for a file */ + Msg int64 /* number of IPC message operations */ + Sema int64 /* number of IPC semaphore operations */ + MinFaults int64 /* number of page faults with no I/O */ + MajFaults int64 /* number of page faults with disk I/O */ + PUser int64 /* raw number of physical processor tics in user mode */ + PSys int64 /* raw number of physical processor tics in system mode */ + PIdle int64 /* raw number of physical processor tics idle */ + PWait int64 /* raw number of physical processor tics waiting for I/O */ + RedispSD0 int64 /* number of thread redispatches within the scheduler affinity domain 0 */ + RedispSD1 int64 /* number of thread redispatches within the scheduler affinity domain 1 */ + RedispSD2 int64 /* number of thread redispatches within the scheduler affinity domain 2 */ + RedispSD3 int64 /* number of thread redispatches within the scheduler affinity domain 3 */ + RedispSD4 int64 /* number of thread redispatches within the scheduler affinity domain 4 */ + RedispSD5 int64 /* number of thread redispatches within the scheduler affinity domain 5 */ + MigrationPush int64 /* number of thread migrations from the local runque to another queue due to starvation load balancing */ + MigrationS3grq int64 /* number of thread migrations from the global runque to the local runque resulting in a move accross scheduling domain 3 */ + MigrationS3pul int64 /* number of thread migrations from another processor's runque resulting in a move accross scheduling domain 3 */ + InvolCSwitch int64 /* number of involuntary thread context switches */ + VolCSwitch int64 /* number of voluntary thread context switches */ + RunQueue int64 /* number of threads on the runque */ + Bound int64 /* number of bound threads */ + DecrIntrs int64 /* number of decrementer tics interrupts */ + MpcRIntrs int64 /* number of mpc's received interrupts */ + MpcSIntrs int64 /* number of mpc's sent interrupts */ + DevIntrs int64 /* number of device interrupts */ + SoftIntrs int64 /* number of offlevel handlers called */ + PhantIntrs int64 /* number of phantom interrupts */ + IdleDonatedPurr int64 /* number of idle cycles donated by a dedicated partition enabled for donation */ + IdleDonatedSpurr int64 /* number of idle spurr cycles donated by a dedicated partition enabled for donation */ + BusyDonatedPurr int64 /* number of busy cycles donated by a dedicated partition enabled for donation */ + BusyDonatedSpurr int64 /* number of busy spurr cycles donated by a dedicated partition enabled for donation */ + IdleStolenPurr int64 /* number of idle cycles stolen by the hypervisor from a dedicated partition */ + IdleStolenSpurr int64 /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */ + BusyStolenPurr int64 /* number of busy cycles stolen by the hypervisor from a dedicated partition */ + BusyStolenSpurr int64 /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */ + Hpi int64 /* number of hypervisor page-ins */ + Hpit int64 /* Time spent in hypervisor page-ins (in nanoseconds)*/ + PUserSpurr int64 /* number of spurr cycles spent in user mode */ + PSysSpurr int64 /* number of spurr cycles spent in kernel mode */ + PIdleSpurr int64 /* number of spurr cycles spent in idle mode */ + PWaitSpurr int64 /* number of spurr cycles spent in wait mode */ + SpurrFlag int32 /* set if running in spurr mode */ + LocalDispatch int64 /* number of local thread dispatches on this logical CPU */ + NearDispatch int64 /* number of near thread dispatches on this logical CPU */ + FarDispatch int64 /* number of far thread dispatches on this logical CPU */ + CSwitches int64 /* Context switches */ + Version int64 /* version number (1, 2, etc.,) */ + TbLast int64 /* timebase counter */ + State int /* Show whether the CPU is offline or online */ + VtbLast int64 /* Last virtual timebase read */ + ICountLast int64 /* Last instruction count read */ +} + +type CPUTotal struct { + NCpus int /* number of active logical processors */ + NCpusCfg int /* number of configured processors */ + Description string /* processor description (type/official name) */ + ProcessorHz int64 /* processor speed in Hz */ + User int64 /* raw total number of clock ticks spent in user mode */ + Sys int64 /* raw total number of clock ticks spent in system mode */ + Idle int64 /* raw total number of clock ticks spent idle */ + Wait int64 /* raw total number of clock ticks spent waiting for I/O */ + PSwitch int64 /* number of process switches (change in currently running process) */ + Syscall int64 /* number of system calls executed */ + Sysread int64 /* number of read system calls executed */ + Syswrite int64 /* number of write system calls executed */ + Sysfork int64 /* number of forks system calls executed */ + Sysexec int64 /* number of execs system calls executed */ + Readch int64 /* number of characters tranferred with read system call */ + Writech int64 /* number of characters tranferred with write system call */ + DevIntrs int64 /* number of device interrupts */ + SoftIntrs int64 /* number of software interrupts */ + Lbolt int64 /* number of ticks since last reboot */ + LoadAvg1 float32 /* times the average number of runnables processes during the last 1, 5 and 15 minutes. */ + LoadAvg5 float32 /* times the average number of runnables processes during the last 1, 5 and 15 minutes. */ + LoadAvg15 float32 /* times the average number of runnables processes during the last 1, 5 and 15 minutes. */ + RunQueue int64 /* length of the run queue (processes ready) */ + SwpQueue int64 /* length of the swap queue (processes waiting to be paged in) */ + Bread int64 /* number of blocks read */ + Bwrite int64 /* number of blocks written */ + Lread int64 /* number of logical read requests */ + Lwrite int64 /* number of logical write requests */ + Phread int64 /* number of physical reads (reads on raw devices) */ + Phwrite int64 /* number of physical writes (writes on raw devices) */ + RunOcc int64 /* updated whenever runque is updated, i.e. the runqueue is occupied. This can be used to compute the simple average of ready processes */ + SwpOcc int64 /* updated whenever swpque is updated. i.e. the swpqueue is occupied. This can be used to compute the simple average processes waiting to be paged in */ + Iget int64 /* number of inode lookups */ + Namei int64 /* number of vnode lookup from a path name */ + Dirblk int64 /* number of 512-byte block reads by the directory search routine to locate an entry for a file */ + Msg int64 /* number of IPC message operations */ + Sema int64 /* number of IPC semaphore operations */ + RcvInt int64 /* number of tty receive interrupts */ + XmtInt int64 /* number of tyy transmit interrupts */ + MdmInt int64 /* number of modem interrupts */ + TtyRawInch int64 /* number of raw input characters */ + TtyCanInch int64 /* number of canonical input characters (always zero) */ + TtyRawOutch int64 /* number of raw output characters */ + Ksched int64 /* number of kernel processes created */ + Koverf int64 /* kernel process creation attempts where: -the user has forked to their maximum limit -the configuration limit of processes has been reached */ + Kexit int64 /* number of kernel processes that became zombies */ + Rbread int64 /* number of remote read requests */ + Rcread int64 /* number of cached remote reads */ + Rbwrt int64 /* number of remote writes */ + Rcwrt int64 /* number of cached remote writes */ + Traps int64 /* number of traps */ + NCpusHigh int64 /* index of highest processor online */ + PUser int64 /* raw number of physical processor tics in user mode */ + PSys int64 /* raw number of physical processor tics in system mode */ + PIdle int64 /* raw number of physical processor tics idle */ + PWait int64 /* raw number of physical processor tics waiting for I/O */ + DecrIntrs int64 /* number of decrementer tics interrupts */ + MpcRIntrs int64 /* number of mpc's received interrupts */ + MpcSIntrs int64 /* number of mpc's sent interrupts */ + PhantIntrs int64 /* number of phantom interrupts */ + IdleDonatedPurr int64 /* number of idle cycles donated by a dedicated partition enabled for donation */ + IdleDonatedSpurr int64 /* number of idle spurr cycles donated by a dedicated partition enabled for donation */ + BusyDonatedPurr int64 /* number of busy cycles donated by a dedicated partition enabled for donation */ + BusyDonatedSpurr int64 /* number of busy spurr cycles donated by a dedicated partition enabled for donation */ + IdleStolenPurr int64 /* number of idle cycles stolen by the hypervisor from a dedicated partition */ + IdleStolenSpurr int64 /* number of idle spurr cycles stolen by the hypervisor from a dedicated partition */ + BusyStolenPurr int64 /* number of busy cycles stolen by the hypervisor from a dedicated partition */ + BusyStolenSpurr int64 /* number of busy spurr cycles stolen by the hypervisor from a dedicated partition */ + IOWait int32 /* number of processes that are asleep waiting for buffered I/O */ + PhysIO int32 /* number of processes waiting for raw I/O */ + TWait int64 /* number of threads that are waiting for filesystem direct(cio) */ + Hpi int64 /* number of hypervisor page-ins */ + Hpit int64 /* Time spent in hypervisor page-ins (in nanoseconds) */ + PUserSpurr int64 /* number of spurr cycles spent in user mode */ + PSysSpurr int64 /* number of spurr cycles spent in kernel mode */ + PIdleSpurr int64 /* number of spurr cycles spent in idle mode */ + PWaitSpurr int64 /* number of spurr cycles spent in wait mode */ + SpurrFlag int /* set if running in spurr mode */ + Version int64 /* version number (1, 2, etc.,) */ + TbLast int64 /*time base counter */ + PurrCoalescing int64 /* If the calling partition is authorized to see pool wide statistics then PURR cycles consumed to coalesce data else set to zero.*/ + SpurrCoalescing int64 /* If the calling partition is authorized to see pool wide statistics then SPURR cycles consumed to coalesce data else set to zero. */ +} + +type CPUUtil struct { + Version int64 + CpuID string /* holds the id of the cpu */ + Entitlement float32 /* Partition's entitlement */ + UserPct float32 /* % of utilization in user mode */ + KernPct float32 /* % of utilization in kernel mode */ + IdlePct float32 /* % of utilization in idle mode */ + WaitPct float32 /* % of utilization in wait mode */ + PhysicalBusy float32 /* physical cpus busy */ + PhysicalConsumed float32 /* total cpus consumed by the partition */ + FreqPct float32 /* Average freq% over the last interval */ + EntitlementPct float32 /* % of entitlement used */ + BusyPct float32 /* % of entitlement busy */ + IdleDonatedPct float32 /* % idle cycles donated */ + BusyDonatedPct float32 /* % of busy cycles donated */ + IdleStolenPct float32 /* % idle cycles stolen */ + BusyStolenPct float32 /* % busy cycles stolen */ + LUserPct float32 /* % of utilization in user mode, in terms of logical processor ticks */ + LKernPct float32 /* % of utilization in kernel mode, in terms of logical processor ticks*/ + LIdlePct float32 /* % of utilization in idle mode, in terms of logical processor ticks */ + LWaitPct float32 /* % of utilization in wait mode, in terms of logical processor ticks */ + DeltaTime int64 /* delta time in milliseconds, for which utilization is evaluated */ +} diff --git a/vendor/github.com/power-devops/perfstat/types_disk.go b/vendor/github.com/power-devops/perfstat/types_disk.go new file mode 100644 index 0000000000000..ca1493d872679 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/types_disk.go @@ -0,0 +1,176 @@ +package perfstat + +type DiskTotal struct { + Number int32 /* total number of disks */ + Size int64 /* total size of all disks (in MB) */ + Free int64 /* free portion of all disks (in MB) */ + XRate int64 /* __rxfers: total number of transfers from disk */ + Xfers int64 /* total number of transfers to/from disk */ + Wblks int64 /* 512 bytes blocks written to all disks */ + Rblks int64 /* 512 bytes blocks read from all disks */ + Time int64 /* amount of time disks are active */ + Version int64 /* version number (1, 2, etc.,) */ + Rserv int64 /* Average read or receive service time */ + MinRserv int64 /* min read or receive service time */ + MaxRserv int64 /* max read or receive service time */ + RTimeOut int64 /* number of read request timeouts */ + RFailed int64 /* number of failed read requests */ + Wserv int64 /* Average write or send service time */ + MinWserv int64 /* min write or send service time */ + MaxWserv int64 /* max write or send service time */ + WTimeOut int64 /* number of write request timeouts */ + WFailed int64 /* number of failed write requests */ + WqDepth int64 /* instantaneous wait queue depth (number of requests waiting to be sent to disk) */ + WqTime int64 /* accumulated wait queueing time */ + WqMinTime int64 /* min wait queueing time */ + WqMaxTime int64 /* max wait queueing time */ +} + +// Disk Adapter Types +const ( + DA_SCSI = 0 /* 0 ==> SCSI, SAS, other legacy adapter types */ + DA_VSCSI /* 1 ==> Virtual SCSI/SAS Adapter */ + DA_FCA /* 2 ==> Fiber Channel Adapter */ +) + +type DiskAdapter struct { + Name string /* name of the adapter (from ODM) */ + Description string /* adapter description (from ODM) */ + Number int32 /* number of disks connected to adapter */ + Size int64 /* total size of all disks (in MB) */ + Free int64 /* free portion of all disks (in MB) */ + XRate int64 /* __rxfers: total number of reads via adapter */ + Xfers int64 /* total number of transfers via adapter */ + Rblks int64 /* 512 bytes blocks written via adapter */ + Wblks int64 /* 512 bytes blocks read via adapter */ + Time int64 /* amount of time disks are active */ + Version int64 /* version number (1, 2, etc.,) */ + AdapterType int64 /* 0 ==> SCSI, SAS, other legacy adapter types, 1 ==> Virtual SCSI/SAS Adapter, 2 ==> Fiber Channel Adapter */ + DkBSize int64 /* Number of Bytes in a block for this disk*/ + DkRxfers int64 /* Number of transfers from disk */ + DkRserv int64 /* read or receive service time */ + DkWserv int64 /* write or send service time */ + MinRserv int64 /* Minimum read service time */ + MaxRserv int64 /* Maximum read service time */ + MinWserv int64 /* Minimum Write service time */ + MaxWserv int64 /* Maximum write service time */ + WqDepth int64 /* driver wait queue depth */ + WqSampled int64 /* accumulated sampled dk_wq_depth */ + WqTime int64 /* accumulated wait queueing time */ + WqMinTime int64 /* minimum wait queueing time */ + WqMaxTime int64 /* maximum wait queueing time */ + QFull int64 /* "Service" queue full occurrence count (number of times the adapter/devices connected to the adapter is not accepting any more request) */ + QSampled int64 /* accumulated sampled */ +} + +type Disk struct { + Name string /* name of the disk */ + Description string /* disk description (from ODM) */ + VGName string /* volume group name (from ODM) */ + Size int64 /* size of the disk (in MB) */ + Free int64 /* free portion of the disk (in MB) */ + BSize int64 /* disk block size (in bytes) */ + XRate int64 /* number of transfers from disk */ + Xfers int64 /* number of transfers to/from disk */ + Wblks int64 /* number of blocks written to disk */ + Rblks int64 /* number of blocks read from disk */ + QDepth int64 /* instantaneous "service" queue depth (number of requests sent to disk and not completed yet) */ + Time int64 /* amount of time disk is active */ + Adapter string /* disk adapter name */ + PathsCount int32 /* number of paths to this disk */ + QFull int64 /* "service" queue full occurrence count (number of times the disk is not accepting any more request) */ + Rserv int64 /* read or receive service time */ + RTimeOut int64 /* number of read request timeouts */ + Rfailed int64 /* number of failed read requests */ + MinRserv int64 /* min read or receive service time */ + MaxRserv int64 /* max read or receive service time */ + Wserv int64 /* write or send service time */ + WTimeOut int64 /* number of write request timeouts */ + Wfailed int64 /* number of failed write requests */ + MinWserv int64 /* min write or send service time */ + MaxWserv int64 /* max write or send service time */ + WqDepth int64 /* instantaneous wait queue depth (number of requests waiting to be sent to disk) */ + WqSampled int64 /* accumulated sampled dk_wq_depth */ + WqTime int64 /* accumulated wait queueing time */ + WqMinTime int64 /* min wait queueing time */ + WqMaxTime int64 /* max wait queueing time */ + QSampled int64 /* accumulated sampled dk_q_depth */ + Version int64 /* version number (1, 2, etc.,) */ + PseudoDisk bool /*Indicates whether pseudo or physical disk */ + VTDisk bool /* 1- Virtual Target Disk, 0 - Others */ +} + +type DiskPath struct { + Name string /* name of the path */ + XRate int64 /* __rxfers: number of reads via the path */ + Xfers int64 /* number of transfers via the path */ + Rblks int64 /* 512 bytes blocks written via the path */ + Wblks int64 /* 512 bytes blocks read via the path */ + Time int64 /* amount of time disks are active */ + Adapter string /* disk adapter name (from ODM) */ + QFull int64 /* "service" queue full occurrence count (number of times the disk is not accepting any more request) */ + Rserv int64 /* read or receive service time */ + RTimeOut int64 /* number of read request timeouts */ + Rfailed int64 /* number of failed read requests */ + MinRserv int64 /* min read or receive service time */ + MaxRserv int64 /* max read or receive service time */ + Wserv int64 /* write or send service time */ + WTimeOut int64 /* number of write request timeouts */ + Wfailed int64 /* number of failed write requests */ + MinWserv int64 /* min write or send service time */ + MaxWserv int64 /* max write or send service time */ + WqDepth int64 /* instantaneous wait queue depth (number of requests waiting to be sent to disk) */ + WqSampled int64 /* accumulated sampled dk_wq_depth */ + WqTime int64 /* accumulated wait queueing time */ + WqMinTime int64 /* min wait queueing time */ + WqMaxTime int64 /* max wait queueing time */ + QSampled int64 /* accumulated sampled dk_q_depth */ + Version int64 /* version number (1, 2, etc.,) */ +} + +const ( + FC_DOWN = 0 // FC Adapter state is DOWN + FC_UP = 1 // FC Adapter state is UP +) + +const ( + FCT_FCHBA = 0 // FC type - real Fiber Channel Adapter + FCT_VFC = 1 // FC type - virtual Fiber Channel +) + +type FCAdapter struct { + Version int64 /* version number (1, 2, etc.,) */ + Name string /* name of the adapter */ + State int32 /* FC Adapter state UP or DOWN */ + InputRequests int64 /* Number of Input Requests*/ + OutputRequests int64 /* Number of Output Requests */ + InputBytes int64 /* Number of Input Bytes */ + OutputBytes int64 /* Number of Output Bytes */ + EffMaxTransfer int64 /* Adapter's Effective Maximum Transfer Value */ + NoDMAResourceCnt int64 /* Count of DMA failures due to no DMA Resource available */ + NoCmdResourceCnt int64 /* Count of failures to allocate a command due to no command resource available */ + AttentionType int32 /* Link up or down Indicator */ + SecondsSinceLastReset int64 /* Displays the seconds since last reset of the statistics on the adapter */ + TxFrames int64 /* Number of frames transmitted */ + TxWords int64 /* Fiber Channel Kbytes transmitted */ + RxFrames int64 /* Number of Frames Received */ + RxWords int64 /* Fiber Channel Kbytes Received */ + LIPCount int64 /* Count of LIP (Loop Initialization Protocol) Events received in case we have FC-AL */ + NOSCount int64 /* Count of NOS (Not_Operational) Events. This indicates a link failure state. */ + ErrorFrames int64 /* Number of frames received with the CRC Error */ + DumpedFrames int64 /* Number of lost frames */ + LinkFailureCount int64 /* Count of Link failures */ + LossofSyncCount int64 /* Count of loss of sync */ + LossofSignal int64 /* Count of loss of Signal */ + PrimitiveSeqProtocolErrCount int64 /* number of times a primitive sequence was in error */ + InvalidTxWordCount int64 /* Count of Invalid Transmission words received */ + InvalidCRCCount int64 /* Count of CRC Errors in a Received Frame */ + PortFcId int64 /* SCSI Id of the adapter */ + PortSpeed int64 /* Speed of Adapter in GBIT */ + PortType string /* Type of connection. The Possible Values are Fabric, Private Loop, Point-to-Point, unknown */ + PortWWN int64 /* World Wide Port name */ + PortSupportedSpeed int64 /* Supported Port Speed in GBIT */ + AdapterType int /* 0 - Fiber Chanel, 1 - Virtual Fiber Chanel Adapter */ + VfcName string /* name of the Virtual Fiber Chanel(VFC) adapter */ + ClientPartName string /* name of the client partition */ +} diff --git a/vendor/github.com/power-devops/perfstat/types_fs.go b/vendor/github.com/power-devops/perfstat/types_fs.go new file mode 100644 index 0000000000000..0be048a3844cb --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/types_fs.go @@ -0,0 +1,195 @@ +package perfstat + +import ( + "strings" +) + +type FileSystem struct { + Device string /* name of the mounted device */ + MountPoint string /* where the device is mounted */ + FSType int /* File system type, see the constants below */ + Flags int /* Flags of the file system */ + TotalBlocks int64 /* number of 512 bytes blocks in the filesystem */ + FreeBlocks int64 /* number of free 512 bytes block in the filesystem */ + TotalInodes int64 /* total number of inodes in the filesystem */ + FreeInodes int64 /* number of free inodes in the filesystem */ +} + +func (f *FileSystem) TypeString() string { + switch f.FSType { + case FS_JFS2: + return "jfs2" + case FS_NAMEFS: + return "namefs" + case FS_NFS: + return "nfs" + case FS_JFS: + return "jfs" + case FS_CDROM: + return "cdrfs" + case FS_PROCFS: + return "procfs" + case FS_SFS: + return "sfs" + case FS_CACHEFS: + return "cachefs" + case FS_NFS3: + return "nfs3" + case FS_AUTOFS: + return "autofs" + case FS_POOLFS: + return "poolfs" + case FS_VXFS: + return "vxfs" + case FS_VXODM: + return "vxodm" + case FS_UDF: + return "udfs" + case FS_NFS4: + return "nfs4" + case FS_RFS4: + return "rfs4" + case FS_CIFS: + return "cifs" + case FS_PMEMFS: + return "pmemfs" + case FS_AHAFS: + return "ahafs" + case FS_STNFS: + return "stnfs" + case FS_ASMFS: + return "asmfs" + } + return "unknown" +} + +func (f *FileSystem) FlagsString() string { + var flags []string + + switch { + case f.Flags&VFS_READONLY != 0: + flags = append(flags, "ro") + case f.Flags&VFS_REMOVABLE != 0: + flags = append(flags, "removable") + case f.Flags&VFS_DEVMOUNT != 0: + flags = append(flags, "local") + case f.Flags&VFS_REMOTE != 0: + flags = append(flags, "remote") + case f.Flags&VFS_SYSV_MOUNT != 0: + flags = append(flags, "sysv") + case f.Flags&VFS_UNMOUNTING != 0: + flags = append(flags, "unmounting") + case f.Flags&VFS_NOSUID != 0: + flags = append(flags, "nosuid") + case f.Flags&VFS_NODEV != 0: + flags = append(flags, "nodev") + case f.Flags&VFS_NOINTEG != 0: + flags = append(flags, "nointeg") + case f.Flags&VFS_NOMANAGER != 0: + flags = append(flags, "nomanager") + case f.Flags&VFS_NOCASE != 0: + flags = append(flags, "nocase") + case f.Flags&VFS_UPCASE != 0: + flags = append(flags, "upcase") + case f.Flags&VFS_NBC != 0: + flags = append(flags, "nbc") + case f.Flags&VFS_MIND != 0: + flags = append(flags, "mind") + case f.Flags&VFS_RBR != 0: + flags = append(flags, "rbr") + case f.Flags&VFS_RBW != 0: + flags = append(flags, "rbw") + case f.Flags&VFS_DISCONNECTED != 0: + flags = append(flags, "disconnected") + case f.Flags&VFS_SHUTDOWN != 0: + flags = append(flags, "shutdown") + case f.Flags&VFS_VMOUNTOK != 0: + flags = append(flags, "vmountok") + case f.Flags&VFS_SUSER != 0: + flags = append(flags, "suser") + case f.Flags&VFS_SOFT_MOUNT != 0: + flags = append(flags, "soft") + case f.Flags&VFS_UNMOUNTED != 0: + flags = append(flags, "unmounted") + case f.Flags&VFS_DEADMOUNT != 0: + flags = append(flags, "deadmount") + case f.Flags&VFS_SNAPSHOT != 0: + flags = append(flags, "snapshot") + case f.Flags&VFS_VCM_ON != 0: + flags = append(flags, "vcm_on") + case f.Flags&VFS_VCM_MONITOR != 0: + flags = append(flags, "vcm_monitor") + case f.Flags&VFS_ATIMEOFF != 0: + flags = append(flags, "noatime") + case f.Flags&VFS_READMOSTLY != 0: + flags = append(flags, "readmostly") + case f.Flags&VFS_CIOR != 0: + flags = append(flags, "cior") + case f.Flags&VFS_CIO != 0: + flags = append(flags, "cio") + case f.Flags&VFS_DIO != 0: + flags = append(flags, "dio") + } + + return strings.Join(flags, ",") +} + +// Filesystem types +const ( + FS_JFS2 = 0 /* AIX physical fs "jfs2" */ + FS_NAMEFS = 1 /* AIX pseudo fs "namefs" */ + FS_NFS = 2 /* SUN Network File System "nfs" */ + FS_JFS = 3 /* AIX R3 physical fs "jfs" */ + FS_CDROM = 5 /* CDROM File System "cdrom" */ + FS_PROCFS = 6 /* PROCFS File System "proc" */ + FS_SFS = 16 /* AIX Special FS (STREAM mounts) */ + FS_CACHEFS = 17 /* Cachefs file system */ + FS_NFS3 = 18 /* NFSv3 file system */ + FS_AUTOFS = 19 /* Automount file system */ + FS_POOLFS = 20 /* Pool file system */ + FS_VXFS = 32 /* THRPGIO File System "vxfs" */ + FS_VXODM = 33 /* For Veritas File System */ + FS_UDF = 34 /* UDFS file system */ + FS_NFS4 = 35 /* NFSv4 file system */ + FS_RFS4 = 36 /* NFSv4 Pseudo file system */ + FS_CIFS = 37 /* AIX SMBFS (CIFS client) */ + FS_PMEMFS = 38 /* MCR Async Mobility pseudo file system */ + FS_AHAFS = 39 /* AHAFS File System "aha" */ + FS_STNFS = 40 /* Short-Term NFS */ + FS_ASMFS = 41 /* Oracle ASM FS */ +) + +// Filesystem flags +const ( + VFS_READONLY = 0x00000001 /* rdonly access to vfs */ + VFS_REMOVABLE = 0x00000002 /* removable (diskette) media */ + VFS_DEVMOUNT = 0x00000004 /* physical device mount */ + VFS_REMOTE = 0x00000008 /* file system is on network */ + VFS_SYSV_MOUNT = 0x00000010 /* System V style mount */ + VFS_UNMOUNTING = 0x00000020 /* originated by unmount() */ + VFS_NOSUID = 0x00000040 /* don't maintain suid-ness across this mount */ + VFS_NODEV = 0x00000080 /* don't allow device access across this mount */ + VFS_NOINTEG = 0x00000100 /* no integrity mount option */ + VFS_NOMANAGER = 0x00000200 /* mount managed fs w/o manager */ + VFS_NOCASE = 0x00000400 /* do not map dir names */ + VFS_UPCASE = 0x00000800 /* map dir names to uppercase */ + VFS_NBC = 0x00001000 /* NBC cached file in this vfs */ + VFS_MIND = 0x00002000 /* multi-segment .indirect */ + VFS_RBR = 0x00004000 /* Release-behind when reading */ + VFS_RBW = 0x00008000 /* Release-behind when writing */ + VFS_DISCONNECTED = 0x00010000 /* file mount not in use */ + VFS_SHUTDOWN = 0x00020000 /* forced unmount for shutdown */ + VFS_VMOUNTOK = 0x00040000 /* dir/file mnt permission flag */ + VFS_SUSER = 0x00080000 /* client-side suser perm. flag */ + VFS_SOFT_MOUNT = 0x00100000 /* file-over-file or directory over directory "soft" mount */ + VFS_UNMOUNTED = 0x00200000 /* unmount completed, stale vnodes are left in the vfs */ + VFS_DEADMOUNT = 0x00400000 /* softmount vfs should be disconnected at last vnode free */ + VFS_SNAPSHOT = 0x00800000 /* snapshot mount */ + VFS_VCM_ON = 0x01000000 /* VCM is currently active */ + VFS_VCM_MONITOR = 0x02000000 /* VCM monitoring is active */ + VFS_ATIMEOFF = 0x04000000 /* no atime updates during i/o */ + VFS_READMOSTLY = 0x10000000 /* ROFS allows open for write */ + VFS_CIOR = 0x20000000 /* O_CIOR mount */ + VFS_CIO = 0x40000000 /* O_CIO mount */ + VFS_DIO = 0x80000000 /* O_DIRECT mount */ +) diff --git a/vendor/github.com/power-devops/perfstat/types_lpar.go b/vendor/github.com/power-devops/perfstat/types_lpar.go new file mode 100644 index 0000000000000..2d3c32fa8cb76 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/types_lpar.go @@ -0,0 +1,68 @@ +package perfstat + +type PartitionType struct { + SmtCapable bool /* OS supports SMT mode */ + SmtEnabled bool /* SMT mode is on */ + LparCapable bool /* OS supports logical partitioning */ + LparEnabled bool /* logical partitioning is on */ + SharedCapable bool /* OS supports shared processor LPAR */ + SharedEnabled bool /* partition runs in shared mode */ + DLparCapable bool /* OS supports dynamic LPAR */ + Capped bool /* partition is capped */ + Kernel64bit bool /* kernel is 64 bit */ + PoolUtilAuthority bool /* pool utilization available */ + DonateCapable bool /* capable of donating cycles */ + DonateEnabled bool /* enabled for donating cycles */ + AmsCapable bool /* 1 = AMS(Active Memory Sharing) capable, 0 = Not AMS capable */ + AmsEnabled bool /* 1 = AMS(Active Memory Sharing) enabled, 0 = Not AMS enabled */ + PowerSave bool /*1= Power saving mode is enabled*/ + AmeEnabled bool /* Active Memory Expansion is enabled */ + SharedExtended bool +} + +type PartitionValue struct { + Online int64 + Max int64 + Min int64 + Desired int64 +} + +type PartitionConfig struct { + Version int64 /* Version number */ + Name string /* Partition Name */ + Node string /* Node Name */ + Conf PartitionType /* Partition Properties */ + Number int32 /* Partition Number */ + GroupID int32 /* Group ID */ + ProcessorFamily string /* Processor Type */ + ProcessorModel string /* Processor Model */ + MachineID string /* Machine ID */ + ProcessorMhz float64 /* Processor Clock Speed in MHz */ + NumProcessors PartitionValue /* Number of Configured Physical Processors in frame*/ + OSName string /* Name of Operating System */ + OSVersion string /* Version of operating System */ + OSBuild string /* Build of Operating System */ + LCpus int32 /* Number of Logical CPUs */ + SmtThreads int32 /* Number of SMT Threads */ + Drives int32 /* Total Number of Drives */ + NetworkAdapters int32 /* Total Number of Network Adapters */ + CpuCap PartitionValue /* Min, Max and Online CPU Capacity */ + Weightage int32 /* Variable Processor Capacity Weightage */ + EntCapacity int32 /* number of processor units this partition is entitled to receive */ + VCpus PartitionValue /* Min, Max and Online Virtual CPUs */ + PoolID int32 /* Shared Pool ID of physical processors, to which this partition belongs*/ + ActiveCpusInPool int32 /* Count of physical CPUs in the shared processor pool, to which this partition belongs */ + PoolWeightage int32 /* Pool Weightage */ + SharedPCpu int32 /* Number of physical processors allocated for shared processor use */ + MaxPoolCap int32 /* Maximum processor capacity of partition's pool */ + EntPoolCap int32 /* Entitled processor capacity of partition's pool */ + Mem PartitionValue /* Min, Max and Online Memory */ + MemWeightage int32 /* Variable Memory Capacity Weightage */ + TotalIOMemoryEntitlement int64 /* I/O Memory Entitlement of the partition in bytes */ + MemPoolID int32 /* AMS pool id of the pool the LPAR belongs to */ + HyperPgSize int64 /* Hypervisor page size in KB*/ + ExpMem PartitionValue /* Min, Max and Online Expanded Memory */ + TargetMemExpFactor int64 /* Target Memory Expansion Factor scaled by 100 */ + TargetMemExpSize int64 /* Expanded Memory Size in MB */ + SubProcessorMode int32 /* Split core mode, its value can be 0,1,2 or 4. 0 for unsupported, 1 for capable but not enabled, 2 or 4 for enabled*/ +} diff --git a/vendor/github.com/power-devops/perfstat/types_lvm.go b/vendor/github.com/power-devops/perfstat/types_lvm.go new file mode 100644 index 0000000000000..8f7176a6130f7 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/types_lvm.go @@ -0,0 +1,31 @@ +package perfstat + +type LogicalVolume struct { + Name string /* logical volume name */ + VGName string /* volume group name */ + OpenClose int64 /* LVM_QLVOPEN, etc. (see lvm.h) */ + State int64 /* LVM_UNDEF, etc. (see lvm.h) */ + MirrorPolicy int64 /* LVM_PARALLEL, etc. (see lvm.h) */ + MirrorWriteConsistency int64 /* LVM_CONSIST, etc. (see lvm.h) */ + WriteVerify int64 /* LVM_VERIFY, etc. (see lvm.h) */ + PPsize int64 /* physical partition size in MB */ + LogicalPartitions int64 /* total number of logical paritions configured for this logical volume */ + Mirrors int32 /* number of physical mirrors for each logical partition */ + IOCnt int64 /* Number of read and write requests */ + KBReads int64 /* Number of Kilobytes read */ + KBWrites int64 /* Number of Kilobytes written */ + Version int64 /* version number (1, 2, etc.,) */ +} + +type VolumeGroup struct { + Name string /* volume group name */ + TotalDisks int64 /* number of physical volumes in the volume group */ + ActiveDisks int64 /* number of active physical volumes in the volume group */ + TotalLogicalVolumes int64 /* number of logical volumes in the volume group */ + OpenedLogicalVolumes int64 /* number of logical volumes opened in the volume group */ + IOCnt int64 /* Number of read and write requests */ + KBReads int64 /* Number of Kilobytes read */ + KBWrites int64 /* Number of Kilobytes written */ + Version int64 /* version number (1, 2, etc.,) */ + VariedState int /* Indicates volume group available or not */ +} diff --git a/vendor/github.com/power-devops/perfstat/types_memory.go b/vendor/github.com/power-devops/perfstat/types_memory.go new file mode 100644 index 0000000000000..096d29ad2e96b --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/types_memory.go @@ -0,0 +1,101 @@ +package perfstat + +type MemoryTotal struct { + VirtualTotal int64 /* total virtual memory (in 4KB pages) */ + RealTotal int64 /* total real memory (in 4KB pages) */ + RealFree int64 /* free real memory (in 4KB pages) */ + RealPinned int64 /* real memory which is pinned (in 4KB pages) */ + RealInUse int64 /* real memory which is in use (in 4KB pages) */ + BadPages int64 /* number of bad pages */ + PageFaults int64 /* number of page faults */ + PageIn int64 /* number of pages paged in */ + PageOut int64 /* number of pages paged out */ + PgSpIn int64 /* number of page ins from paging space */ + PgSpOut int64 /* number of page outs from paging space */ + Scans int64 /* number of page scans by clock */ + Cycles int64 /* number of page replacement cycles */ + PgSteals int64 /* number of page steals */ + NumPerm int64 /* number of frames used for files (in 4KB pages) */ + PgSpTotal int64 /* total paging space (in 4KB pages) */ + PgSpFree int64 /* free paging space (in 4KB pages) */ + PgSpRsvd int64 /* reserved paging space (in 4KB pages) */ + RealSystem int64 /* real memory used by system segments (in 4KB pages). */ + RealUser int64 /* real memory used by non-system segments (in 4KB pages). */ + RealProcess int64 /* real memory used by process segments (in 4KB pages). */ + VirtualActive int64 /* Active virtual pages. Virtual pages are considered active if they have been accessed */ + IOME int64 /* I/O memory entitlement of the partition in bytes*/ + IOMU int64 /* I/O memory entitlement of the partition in use in bytes*/ + IOHWM int64 /* High water mark of I/O memory entitlement used in bytes*/ + PMem int64 /* Amount of physical mmeory currently backing partition's logical memory in bytes*/ + CompressedTotal int64 /* Total numbers of pages in compressed pool (in 4KB pages) */ + CompressedWSegPg int64 /* Number of compressed working storage pages */ + CPgIn int64 /* number of page ins to compressed pool */ + CPgOut int64 /* number of page outs from compressed pool */ + TrueSize int64 /* True Memory Size in 4KB pages */ + ExpandedMemory int64 /* Expanded Memory Size in 4KB pages */ + CompressedWSegSize int64 /* Total size of the compressed working storage pages in the pool */ + TargetCPoolSize int64 /* Target Compressed Pool Size in bytes */ + MaxCPoolSize int64 /* Max Size of Compressed Pool in bytes */ + MinUCPoolSize int64 /* Min Size of Uncompressed Pool in bytes */ + CPoolSize int64 /* Compressed Pool size in bytes */ + UCPoolSize int64 /* Uncompressed Pool size in bytes */ + CPoolInUse int64 /* Compressed Pool Used in bytes */ + UCPoolInUse int64 /* Uncompressed Pool Used in bytes */ + Version int64 /* version number (1, 2, etc.,) */ + RealAvailable int64 /* number of pages (in 4KB pages) of memory available without paging out working segments */ + BytesCoalesced int64 /* The number of bytes of the calling partition.s logical real memory coalesced because they contained duplicated data */ + BytesCoalescedMemPool int64 /* number of bytes of logical real memory coalesced because they contained duplicated data in the calling partition.s memory */ +} + +type MemoryPage struct { + PSize int64 /* page size in bytes */ + RealTotal int64 /* number of real memory frames of this page size */ + RealFree int64 /* number of pages on free list */ + RealPinned int64 /* number of pages pinned */ + RealInUse int64 /* number of pages in use */ + PgExct int64 /* number of page faults */ + PgIns int64 /* number of pages paged in */ + PgOuts int64 /* number of pages paged out */ + PgSpIns int64 /* number of page ins from paging space */ + PgSpOuts int64 /* number of page outs from paging space */ + Scans int64 /* number of page scans by clock */ + Cycles int64 /* number of page replacement cycles */ + PgSteals int64 /* number of page steals */ + NumPerm int64 /* number of frames used for files */ + NumPgSp int64 /* number of pages with allocated paging space */ + RealSystem int64 /* number of pages used by system segments. */ + RealUser int64 /* number of pages used by non-system segments. */ + RealProcess int64 /* number of pages used by process segments. */ + VirtActive int64 /* Active virtual pages. */ + ComprsdTotal int64 /* Number of pages of this size compressed */ + ComprsdWsegPgs int64 /* Number of compressed working storage pages */ + CPgIns int64 /* number of page ins of this page size to compressed pool */ + CPgOuts int64 /* number of page outs of this page size from compressed pool */ + CPoolInUse int64 /* Compressed Size of this page size in Compressed Pool */ + UCPoolSize int64 /* Uncompressed Pool size in bytes of this page size */ + ComprsdWsegSize int64 /* Total size of the compressed working storage pages in the pool */ + Version int64 /* version number (1, 2, etc.,) */ + RealAvail int64 /* number of pages (in 4KB pages) of memory available without paging out working segments */ +} + +// paging space types +const ( + LV_PAGING = 1 + NFS_PAGING = 2 + UNKNOWN_PAGING = 3 +) + +type PagingSpace struct { + Name string /* Paging space name */ + Type uint8 /* type of paging device (LV_PAGING or NFS_PAGING) */ + VGName string /* volume group name */ + Hostname string /* host name of paging server */ + Filename string /* swap file name on server */ + LPSize int64 /* size in number of logical partitions */ + MBSize int64 /* size in megabytes */ + MBUsed int64 /* portion used in megabytes */ + IOPending int64 /* number of pending I/O */ + Active uint8 /* indicates if active (1 if so, 0 if not) */ + Automatic uint8 /* indicates if automatic (1 if so, 0 if not) */ + Version int64 /* version number (1, 2, etc.,) */ +} diff --git a/vendor/github.com/power-devops/perfstat/types_network.go b/vendor/github.com/power-devops/perfstat/types_network.go new file mode 100644 index 0000000000000..e69d0041d33a9 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/types_network.go @@ -0,0 +1,163 @@ +package perfstat + +// Network Interface types +const ( + IFT_OTHER = 0x1 + IFT_1822 = 0x2 /* old-style arpanet imp */ + IFT_HDH1822 = 0x3 /* HDH arpanet imp */ + IFT_X25DDN = 0x4 /* x25 to imp */ + IFT_X25 = 0x5 /* PDN X25 interface (RFC877) */ + IFT_ETHER = 0x6 /* Ethernet CSMACD */ + IFT_ISO88023 = 0x7 /* CMSA CD */ + IFT_ISO88024 = 0x8 /* Token Bus */ + IFT_ISO88025 = 0x9 /* Token Ring */ + IFT_ISO88026 = 0xa /* MAN */ + IFT_STARLAN = 0xb + IFT_P10 = 0xc /* Proteon 10MBit ring */ + IFT_P80 = 0xd /* Proteon 10MBit ring */ + IFT_HY = 0xe /* Hyperchannel */ + IFT_FDDI = 0xf + IFT_LAPB = 0x10 + IFT_SDLC = 0x11 + IFT_T1 = 0x12 + IFT_CEPT = 0x13 /* E1 - european T1 */ + IFT_ISDNBASIC = 0x14 + IFT_ISDNPRIMARY = 0x15 + IFT_PTPSERIAL = 0x16 /* Proprietary PTP serial */ + IFT_PPP = 0x17 /* RFC 1331 */ + IFT_LOOP = 0x18 /* loopback */ + IFT_EON = 0x19 /* ISO over IP */ + IFT_XETHER = 0x1a /* obsolete 3MB experimental ethernet */ + IFT_NSIP = 0x1b /* XNS over IP */ + IFT_SLIP = 0x1c /* IP over generic TTY */ + IFT_ULTRA = 0x1d /* Ultra Technologies */ + IFT_DS3 = 0x1e /* Generic T3 */ + IFT_SIP = 0x1f /* SMDS */ + IFT_FRELAY = 0x20 /* Frame Relay DTE only */ + IFT_RS232 = 0x21 + IFT_PARA = 0x22 /* parallel-port */ + IFT_ARCNET = 0x23 + IFT_ARCNETPLUS = 0x24 + IFT_ATM = 0x25 /* ATM cells */ + IFT_MIOX25 = 0x26 + IFT_SONET = 0x27 /* SONET or SDH */ + IFT_X25PLE = 0x28 + IFT_ISO88022LLC = 0x29 + IFT_LOCALTALK = 0x2a + IFT_SMDSDXI = 0x2b + IFT_FRELAYDCE = 0x2c /* Frame Relay DCE */ + IFT_V35 = 0x2d + IFT_HSSI = 0x2e + IFT_HIPPI = 0x2f + IFT_MODEM = 0x30 /* Generic Modem */ + IFT_AAL5 = 0x31 /* AAL5 over ATM */ + IFT_SONETPATH = 0x32 + IFT_SONETVT = 0x33 + IFT_SMDSICIP = 0x34 /* SMDS InterCarrier Interface */ + IFT_PROPVIRTUAL = 0x35 /* Proprietary Virtual/internal */ + IFT_PROPMUX = 0x36 /* Proprietary Multiplexing */ + IFT_VIPA = 0x37 /* Virtual Interface */ + IFT_SN = 0x38 /* Federation Switch */ + IFT_SP = 0x39 /* SP switch */ + IFT_FCS = 0x3a /* IP over Fiber Channel */ + IFT_TUNNEL = 0x3b + IFT_GIFTUNNEL = 0x3c /* IPv4 over IPv6 tunnel */ + IFT_HF = 0x3d /* Support for PERCS HFI*/ + IFT_CLUSTER = 0x3e /* cluster pseudo network interface */ + IFT_FB = 0xc7 /* IP over Infiniband. Number by IANA */ +) + +type NetIfaceTotal struct { + Number int32 /* number of network interfaces */ + IPackets int64 /* number of packets received on interface */ + IBytes int64 /* number of bytes received on interface */ + IErrors int64 /* number of input errors on interface */ + OPackets int64 /* number of packets sent on interface */ + OBytes int64 /* number of bytes sent on interface */ + OErrors int64 /* number of output errors on interface */ + Collisions int64 /* number of collisions on csma interface */ + XmitDrops int64 /* number of packets not transmitted */ + Version int64 /* version number (1, 2, etc.,) */ +} + +type NetIface struct { + Name string /* name of the interface */ + Description string /* interface description (from ODM, similar to lscfg output) */ + Type uint8 /* ethernet, tokenring, etc. interpretation can be done using /usr/include/net/if_types.h */ + MTU int64 /* network frame size */ + IPackets int64 /* number of packets received on interface */ + IBytes int64 /* number of bytes received on interface */ + IErrors int64 /* number of input errors on interface */ + OPackets int64 /* number of packets sent on interface */ + OBytes int64 /* number of bytes sent on interface */ + OErrors int64 /* number of output errors on interface */ + Collisions int64 /* number of collisions on csma interface */ + Bitrate int64 /* adapter rating in bit per second */ + XmitDrops int64 /* number of packets not transmitted */ + Version int64 /* version number (1, 2, etc.,) */ + IfIqDrops int64 /* Dropped on input, this interface */ + IfArpDrops int64 /* Dropped because no arp response */ +} + +type NetBuffer struct { + Name string /* size in ascii, always power of 2 (ex: "32", "64", "128") */ + InUse int64 /* number of buffer currently allocated */ + Calls int64 /* number of buffer allocations since last reset */ + Delayed int64 /* number of delayed allocations */ + Free int64 /* number of free calls */ + Failed int64 /* number of failed allocations */ + HighWatermark int64 /* high threshold for number of buffer allocated */ + Freed int64 /* number of buffers freed */ + Version int64 /* version number (1, 2, etc.,) */ +} + +// Network adapter types +const ( + NET_PHY = 0 /* physical device */ + NET_SEA = 1 /* shared ethernet adapter */ + NET_VIR = 2 /* virtual device */ + NET_HEA = 3 /* host ethernet adapter */ + NET_EC = 4 /* etherchannel */ + NET_VLAN = 5 /* vlan pseudo device */ +) + +type NetAdapter struct { + Version int64 /* version number (1,2, etc) */ + Name string /* name of the adapter */ + TxPackets int64 /* Transmit Packets on interface */ + TxBytes int64 /* Transmit Bytes on interface */ + TxInterrupts int64 /* Transfer Interrupts */ + TxErrors int64 /* Transmit Errors */ + TxPacketsDropped int64 /* Packets Dropped at the time of Data Transmission */ + TxQueueSize int64 /* Maximum Packets on Software Transmit Queue */ + TxQueueLen int64 /* Transmission Queue Length */ + TxQueueOverflow int64 /* Transmission Queue Overflow */ + TxBroadcastPackets int64 /* Number of Broadcast Packets Transmitted */ + TxMulticastPackets int64 /* Number of Multicast packets Transmitted */ + TxCarrierSense int64 /* Lost Carrier Sense signal count */ + TxDMAUnderrun int64 /* Count of DMA Under-runs for Transmission */ + TxLostCTSErrors int64 /* The number of unsuccessful transmissions due to the loss of the Clear-to-Send signal error */ + TxMaxCollisionErrors int64 /* Maximum Collision Errors at Transmission */ + TxLateCollisionErrors int64 /* Late Collision Errors at Transmission */ + TxDeferred int64 /* The number of packets deferred for Transmission. */ + TxTimeoutErrors int64 /* Time Out Errors for Transmission */ + TxSingleCollisionCount int64 /* Count of Single Collision error at Transmission */ + TxMultipleCollisionCount int64 /* Count of Multiple Collision error at Transmission */ + RxPackets int64 /* Receive Packets on interface */ + RxBytes int64 /* Receive Bytes on interface */ + RxInterrupts int64 /* Receive Interrupts */ + RxErrors int64 /* Input errors on interface */ + RxPacketsDropped int64 /* The number of packets accepted by the device driver for transmission which were not (for any reason) given to the device. */ + RxBadPackets int64 /* Count of Bad Packets Received. */ + RxMulticastPackets int64 /* Number of MultiCast Packets Received */ + RxBroadcastPackets int64 /* Number of Broadcast Packets Received */ + RxCRCErrors int64 /* Count of Packets Received with CRC errors */ + RxDMAOverrun int64 /* Count of DMA over-runs for Data Receival. */ + RxAlignmentErrors int64 /* Packets Received with Alignment Error */ + RxNoResourceErrors int64 /* Packets Received with No Resource Errors */ + RxCollisionErrors int64 /* Packets Received with Collision errors */ + RxPacketTooShortErrors int64 /* Count of Short Packets Received. */ + RxPacketTooLongErrors int64 /* Count of Too Long Packets Received. */ + RxPacketDiscardedByAdapter int64 /* Count of Received Packets discarded by Adapter. */ + AdapterType int32 /* 0 - Physical, 1 - SEA, 2 - Virtual, 3 -HEA */ +} diff --git a/vendor/github.com/power-devops/perfstat/types_process.go b/vendor/github.com/power-devops/perfstat/types_process.go new file mode 100644 index 0000000000000..325c70b077553 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/types_process.go @@ -0,0 +1,43 @@ +package perfstat + +type Process struct { + Version int64 /* version number (1, 2, etc.,) */ + PID int64 /* Process ID */ + ProcessName string /* Name of The Process */ + Priority int32 /* Process Priority */ + NumThreads int64 /* Thread Count */ + UID int64 /* Owner Info */ + ClassID int64 /* WLM Class Name */ + Size int64 /* Virtual Size of the Process in KB(Exclusive Usage, Leaving all Shared Library Text & Shared File Pages, Shared Memory, Memory Mapped) */ + RealMemData int64 /* Real Memory used for Data in KB */ + RealMemText int64 /* Real Memory used for Text in KB */ + VirtMemData int64 /* Virtual Memory used to Data in KB */ + VirtMemText int64 /* Virtual Memory used for Text in KB */ + SharedLibDataSize int64 /* Data Size from Shared Library in KB */ + HeapSize int64 /* Heap Size in KB */ + RealInUse int64 /* The Real memory in use(in KB) by the process including all kind of segments (excluding system segments). This includes Text, Data, Shared Library Text, Shared Library Data, File Pages, Shared Memory & Memory Mapped */ + VirtInUse int64 /* The Virtual memory in use(in KB) by the process including all kind of segments (excluding system segments). This includes Text, Data, Shared Library Text, Shared Library Data, File Pages, Shared Memory & Memory Mapped */ + Pinned int64 /* Pinned Memory(in KB) for this process inclusive of all segments */ + PgSpInUse int64 /* Paging Space used(in KB) inclusive of all segments */ + FilePages int64 /* File Pages used(in KB) including shared pages */ + RealInUseMap int64 /* Real memory used(in KB) for Shared Memory and Memory Mapped regions */ + VirtInUseMap int64 /* Virtual Memory used(in KB) for Shared Memory and Memory Mapped regions */ + PinnedInUseMap int64 /* Pinned memory(in KB) for Shared Memory and Memory Mapped regions */ + UCpuTime float64 /* User Mode CPU time will be in percentage or milliseconds based on, whether it is filled by perfstat_process_util or perfstat_process respectively. */ + SCpuTime float64 /* System Mode CPU time will be in percentage or milliseconds based on, whether it is filled by perfstat_process_util or perfstat_process respectively. */ + LastTimeBase int64 /* Timebase Counter */ + InBytes int64 /* Bytes Read from Disk */ + OutBytes int64 /* Bytes Written to Disk */ + InOps int64 /* In Operations from Disk */ + OutOps int64 /* Out Operations from Disk */ +} + +type Thread struct { + TID int64 /* thread identifier */ + PID int64 /* process identifier */ + CpuID int64 /* processor on which I'm bound */ + UCpuTime float64 /* User Mode CPU time will be in percentage or milliseconds based on, whether it is filled by perfstat_thread_util or perfstat_thread respectively. */ + SCpuTime float64 /* System Mode CPU time will be in percentage or milliseconds based on, whether it is filled by perfstat_thread_util or perfstat_thread respectively. */ + LastTimeBase int64 /* Timebase Counter */ + Version int64 +} diff --git a/vendor/github.com/power-devops/perfstat/uptime.go b/vendor/github.com/power-devops/perfstat/uptime.go new file mode 100644 index 0000000000000..2bd3e568d2df4 --- /dev/null +++ b/vendor/github.com/power-devops/perfstat/uptime.go @@ -0,0 +1,35 @@ +// +build aix + +package perfstat + +/* +#include "c_helpers.h" +*/ +import "C" + +import ( + "fmt" + "time" +) + +func timeSince(ts uint64) uint64 { + return uint64(time.Now().Unix()) - ts +} + +// BootTime() returns the time of the last boot in UNIX seconds +func BootTime() (uint64, error) { + sec := C.boottime() + if sec == -1 { + return 0, fmt.Errorf("Can't determine boot time") + } + return uint64(sec), nil +} + +// UptimeSeconds() calculates uptime in seconds +func UptimeSeconds() (uint64, error) { + boot, err := BootTime() + if err != nil { + return 0, err + } + return timeSince(boot), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/LICENSE b/vendor/github.com/shirou/gopsutil/v4/LICENSE new file mode 100644 index 0000000000000..6f06adcbff34d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/LICENSE @@ -0,0 +1,61 @@ +gopsutil is distributed under BSD license reproduced below. + +Copyright (c) 2014, WAKAYAMA Shirou +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the gopsutil authors nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +------- +internal/common/binary.go in the gopsutil is copied and modified from golang/encoding/binary.go. + + + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/shirou/gopsutil/v4/common/env.go b/vendor/github.com/shirou/gopsutil/v4/common/env.go new file mode 100644 index 0000000000000..4acad1fd1e8a4 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/common/env.go @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: BSD-3-Clause +package common + +type EnvKeyType string + +// EnvKey is a context key that can be used to set programmatically the environment +// gopsutil relies on to perform calls against the OS. +// Example of use: +// +// ctx := context.WithValue(context.Background(), common.EnvKey, EnvMap{common.HostProcEnvKey: "/myproc"}) +// avg, err := load.AvgWithContext(ctx) +var EnvKey = EnvKeyType("env") + +const ( + HostProcEnvKey EnvKeyType = "HOST_PROC" + HostSysEnvKey EnvKeyType = "HOST_SYS" + HostEtcEnvKey EnvKeyType = "HOST_ETC" + HostVarEnvKey EnvKeyType = "HOST_VAR" + HostRunEnvKey EnvKeyType = "HOST_RUN" + HostDevEnvKey EnvKeyType = "HOST_DEV" + HostRootEnvKey EnvKeyType = "HOST_ROOT" +) + +type EnvMap map[EnvKeyType]string diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go new file mode 100644 index 0000000000000..56f53c3a1accd --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu.go @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +import ( + "context" + "encoding/json" + "fmt" + "math" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +// TimesStat contains the amounts of time the CPU has spent performing different +// kinds of work. Time units are in seconds. It is based on linux /proc/stat file. +type TimesStat struct { + CPU string `json:"cpu"` + User float64 `json:"user"` + System float64 `json:"system"` + Idle float64 `json:"idle"` + Nice float64 `json:"nice"` + Iowait float64 `json:"iowait"` + Irq float64 `json:"irq"` + Softirq float64 `json:"softirq"` + Steal float64 `json:"steal"` + Guest float64 `json:"guest"` + GuestNice float64 `json:"guestNice"` +} + +type InfoStat struct { + CPU int32 `json:"cpu"` + VendorID string `json:"vendorId"` + Family string `json:"family"` + Model string `json:"model"` + Stepping int32 `json:"stepping"` + PhysicalID string `json:"physicalId"` + CoreID string `json:"coreId"` + Cores int32 `json:"cores"` + ModelName string `json:"modelName"` + Mhz float64 `json:"mhz"` + CacheSize int32 `json:"cacheSize"` + Flags []string `json:"flags"` + Microcode string `json:"microcode"` +} + +type lastPercent struct { + sync.Mutex + lastCPUTimes []TimesStat + lastPerCPUTimes []TimesStat +} + +var ( + lastCPUPercent lastPercent + invoke common.Invoker = common.Invoke{} +) + +func init() { + lastCPUPercent.Lock() + lastCPUPercent.lastCPUTimes, _ = Times(false) + lastCPUPercent.lastPerCPUTimes, _ = Times(true) + lastCPUPercent.Unlock() +} + +// Counts returns the number of physical or logical cores in the system +func Counts(logical bool) (int, error) { + return CountsWithContext(context.Background(), logical) +} + +func (c TimesStat) String() string { + v := []string{ + `"cpu":"` + c.CPU + `"`, + `"user":` + strconv.FormatFloat(c.User, 'f', 1, 64), + `"system":` + strconv.FormatFloat(c.System, 'f', 1, 64), + `"idle":` + strconv.FormatFloat(c.Idle, 'f', 1, 64), + `"nice":` + strconv.FormatFloat(c.Nice, 'f', 1, 64), + `"iowait":` + strconv.FormatFloat(c.Iowait, 'f', 1, 64), + `"irq":` + strconv.FormatFloat(c.Irq, 'f', 1, 64), + `"softirq":` + strconv.FormatFloat(c.Softirq, 'f', 1, 64), + `"steal":` + strconv.FormatFloat(c.Steal, 'f', 1, 64), + `"guest":` + strconv.FormatFloat(c.Guest, 'f', 1, 64), + `"guestNice":` + strconv.FormatFloat(c.GuestNice, 'f', 1, 64), + } + + return `{` + strings.Join(v, ",") + `}` +} + +// Deprecated: Total returns the total number of seconds in a CPUTimesStat +// Please do not use this internal function. +func (c TimesStat) Total() float64 { + total := c.User + c.System + c.Idle + c.Nice + c.Iowait + c.Irq + + c.Softirq + c.Steal + c.Guest + c.GuestNice + + return total +} + +func (c InfoStat) String() string { + s, _ := json.Marshal(c) + return string(s) +} + +func getAllBusy(t TimesStat) (float64, float64) { + tot := t.Total() + if runtime.GOOS == "linux" { + tot -= t.Guest // Linux 2.6.24+ + tot -= t.GuestNice // Linux 3.2.0+ + } + + busy := tot - t.Idle - t.Iowait + + return tot, busy +} + +func calculateBusy(t1, t2 TimesStat) float64 { + t1All, t1Busy := getAllBusy(t1) + t2All, t2Busy := getAllBusy(t2) + + if t2Busy <= t1Busy { + return 0 + } + if t2All <= t1All { + return 100 + } + return math.Min(100, math.Max(0, (t2Busy-t1Busy)/(t2All-t1All)*100)) +} + +func calculateAllBusy(t1, t2 []TimesStat) ([]float64, error) { + // Make sure the CPU measurements have the same length. + if len(t1) != len(t2) { + return nil, fmt.Errorf( + "received two CPU counts: %d != %d", + len(t1), len(t2), + ) + } + + ret := make([]float64, len(t1)) + for i, t := range t2 { + ret[i] = calculateBusy(t1[i], t) + } + return ret, nil +} + +// Percent calculates the percentage of cpu used either per CPU or combined. +// If an interval of 0 is given it will compare the current cpu times against the last call. +// Returns one value per cpu, or a single value if percpu is set to false. +func Percent(interval time.Duration, percpu bool) ([]float64, error) { + return PercentWithContext(context.Background(), interval, percpu) +} + +func PercentWithContext(ctx context.Context, interval time.Duration, percpu bool) ([]float64, error) { + if interval <= 0 { + return percentUsedFromLastCallWithContext(ctx, percpu) + } + + // Get CPU usage at the start of the interval. + cpuTimes1, err := TimesWithContext(ctx, percpu) + if err != nil { + return nil, err + } + + if err := common.Sleep(ctx, interval); err != nil { + return nil, err + } + + // And at the end of the interval. + cpuTimes2, err := TimesWithContext(ctx, percpu) + if err != nil { + return nil, err + } + + return calculateAllBusy(cpuTimes1, cpuTimes2) +} + +func percentUsedFromLastCall(percpu bool) ([]float64, error) { + return percentUsedFromLastCallWithContext(context.Background(), percpu) +} + +func percentUsedFromLastCallWithContext(ctx context.Context, percpu bool) ([]float64, error) { + cpuTimes, err := TimesWithContext(ctx, percpu) + if err != nil { + return nil, err + } + lastCPUPercent.Lock() + defer lastCPUPercent.Unlock() + var lastTimes []TimesStat + if percpu { + lastTimes = lastCPUPercent.lastPerCPUTimes + lastCPUPercent.lastPerCPUTimes = cpuTimes + } else { + lastTimes = lastCPUPercent.lastCPUTimes + lastCPUPercent.lastCPUTimes = cpuTimes + } + + if lastTimes == nil { + return nil, fmt.Errorf("error getting times for cpu percent. lastTimes was nil") + } + return calculateAllBusy(lastTimes, cpuTimes) +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go new file mode 100644 index 0000000000000..bc766bd4fe9a4 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix.go @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix + +package cpu + +import ( + "context" +) + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go new file mode 100644 index 0000000000000..559dc5feafda9 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_cgo.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix && cgo + +package cpu + +import ( + "context" + + "github.com/power-devops/perfstat" +) + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + var ret []TimesStat + if percpu { + cpus, err := perfstat.CpuStat() + if err != nil { + return nil, err + } + for _, c := range cpus { + ct := &TimesStat{ + CPU: c.Name, + Idle: float64(c.Idle), + User: float64(c.User), + System: float64(c.Sys), + Iowait: float64(c.Wait), + } + ret = append(ret, *ct) + } + } else { + c, err := perfstat.CpuUtilTotalStat() + if err != nil { + return nil, err + } + ct := &TimesStat{ + CPU: "cpu-total", + Idle: float64(c.IdlePct), + User: float64(c.UserPct), + System: float64(c.KernPct), + Iowait: float64(c.WaitPct), + } + ret = append(ret, *ct) + } + return ret, nil +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + c, err := perfstat.CpuTotalStat() + if err != nil { + return nil, err + } + info := InfoStat{ + CPU: 0, + Mhz: float64(c.ProcessorHz / 1000000), + Cores: int32(c.NCpusCfg), + } + result := []InfoStat{info} + return result, nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + c, err := perfstat.CpuTotalStat() + if err != nil { + return 0, err + } + return c.NCpusCfg, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go new file mode 100644 index 0000000000000..51e295a2bccba --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_aix_nocgo.go @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix && !cgo + +package cpu + +import ( + "context" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + if percpu { + return []TimesStat{}, common.ErrNotImplementedError + } else { + out, err := invoke.CommandWithContext(ctx, "sar", "-u", "10", "1") + if err != nil { + return nil, err + } + lines := strings.Split(string(out), "\n") + if len(lines) < 5 { + return []TimesStat{}, common.ErrNotImplementedError + } + + ret := TimesStat{CPU: "cpu-total"} + h := strings.Fields(lines[len(lines)-3]) // headers + v := strings.Fields(lines[len(lines)-2]) // values + for i, header := range h { + if t, err := strconv.ParseFloat(v[i], 64); err == nil { + switch header { + case `%usr`: + ret.User = t + case `%sys`: + ret.System = t + case `%wio`: + ret.Iowait = t + case `%idle`: + ret.Idle = t + } + } + } + + return []TimesStat{ret}, nil + } +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + out, err := invoke.CommandWithContext(ctx, "prtconf") + if err != nil { + return nil, err + } + + ret := InfoStat{} + for _, line := range strings.Split(string(out), "\n") { + if strings.HasPrefix(line, "Number Of Processors:") { + p := strings.Fields(line) + if len(p) > 3 { + if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { + ret.Cores = int32(t) + } + } + } else if strings.HasPrefix(line, "Processor Clock Speed:") { + p := strings.Fields(line) + if len(p) > 4 { + if t, err := strconv.ParseFloat(p[3], 64); err == nil { + switch strings.ToUpper(p[4]) { + case "MHZ": + ret.Mhz = t + case "GHZ": + ret.Mhz = t * 1000.0 + case "KHZ": + ret.Mhz = t / 1000.0 + default: + ret.Mhz = t + } + } + } + break + } + } + return []InfoStat{ret}, nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + info, err := InfoWithContext(ctx) + if err == nil { + return int(info[0].Cores), nil + } + return 0, err +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go new file mode 100644 index 0000000000000..79a458b8e21cc --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin.go @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package cpu + +import ( + "context" + "strconv" + "strings" + + "github.com/shoenig/go-m1cpu" + "github.com/tklauser/go-sysconf" + "golang.org/x/sys/unix" +) + +// sys/resource.h +const ( + CPUser = 0 + cpNice = 1 + cpSys = 2 + cpIntr = 3 + cpIdle = 4 + cpUStates = 5 +) + +// default value. from time.h +var ClocksPerSec = float64(128) + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + ClocksPerSec = float64(clkTck) + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + if percpu { + return perCPUTimes() + } + + return allCPUTimes() +} + +// Returns only one CPUInfoStat on FreeBSD +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + + c := InfoStat{} + c.ModelName, _ = unix.Sysctl("machdep.cpu.brand_string") + family, _ := unix.SysctlUint32("machdep.cpu.family") + c.Family = strconv.FormatUint(uint64(family), 10) + model, _ := unix.SysctlUint32("machdep.cpu.model") + c.Model = strconv.FormatUint(uint64(model), 10) + stepping, _ := unix.SysctlUint32("machdep.cpu.stepping") + c.Stepping = int32(stepping) + features, err := unix.Sysctl("machdep.cpu.features") + if err == nil { + for _, v := range strings.Fields(features) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + leaf7Features, err := unix.Sysctl("machdep.cpu.leaf7_features") + if err == nil { + for _, v := range strings.Fields(leaf7Features) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + extfeatures, err := unix.Sysctl("machdep.cpu.extfeatures") + if err == nil { + for _, v := range strings.Fields(extfeatures) { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + cores, _ := unix.SysctlUint32("machdep.cpu.core_count") + c.Cores = int32(cores) + cacheSize, _ := unix.SysctlUint32("machdep.cpu.cache.size") + c.CacheSize = int32(cacheSize) + c.VendorID, _ = unix.Sysctl("machdep.cpu.vendor") + + if m1cpu.IsAppleSilicon() { + c.Mhz = float64(m1cpu.PCoreHz() / 1_000_000) + } else { + // Use the rated frequency of the CPU. This is a static value and does not + // account for low power or Turbo Boost modes. + cpuFrequency, err := unix.SysctlUint64("hw.cpufrequency") + if err == nil { + c.Mhz = float64(cpuFrequency) / 1000000.0 + } + } + + return append(ret, c), nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + var cpuArgument string + if logical { + cpuArgument = "hw.logicalcpu" + } else { + cpuArgument = "hw.physicalcpu" + } + + count, err := unix.SysctlUint32(cpuArgument) + if err != nil { + return 0, err + } + + return int(count), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_cgo.go new file mode 100644 index 0000000000000..3a02024c5be46 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_cgo.go @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && cgo + +package cpu + +/* +#include +#include +#include +#include +#include +#include +#include +#if TARGET_OS_MAC +#include +#endif +#include +#include +*/ +import "C" + +import ( + "bytes" + "encoding/binary" + "fmt" + "unsafe" +) + +// these CPU times for darwin is borrowed from influxdb/telegraf. + +func perCPUTimes() ([]TimesStat, error) { + var ( + count C.mach_msg_type_number_t + cpuload *C.processor_cpu_load_info_data_t + ncpu C.natural_t + ) + + status := C.host_processor_info(C.host_t(C.mach_host_self()), + C.PROCESSOR_CPU_LOAD_INFO, + &ncpu, + (*C.processor_info_array_t)(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_processor_info error=%d", status) + } + + // jump through some cgo casting hoops and ensure we properly free + // the memory that cpuload points to + target := C.vm_map_t(C.mach_task_self_) + address := C.vm_address_t(uintptr(unsafe.Pointer(cpuload))) + defer C.vm_deallocate(target, address, C.vm_size_t(ncpu)) + + // the body of struct processor_cpu_load_info + // aka processor_cpu_load_info_data_t + var cpu_ticks [C.CPU_STATE_MAX]uint32 + + // copy the cpuload array to a []byte buffer + // where we can binary.Read the data + size := int(ncpu) * binary.Size(cpu_ticks) + buf := (*[1 << 30]byte)(unsafe.Pointer(cpuload))[:size:size] + + bbuf := bytes.NewBuffer(buf) + + var ret []TimesStat + + for i := 0; i < int(ncpu); i++ { + err := binary.Read(bbuf, binary.LittleEndian, &cpu_ticks) + if err != nil { + return nil, err + } + + c := TimesStat{ + CPU: fmt.Sprintf("cpu%d", i), + User: float64(cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, + System: float64(cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, + Nice: float64(cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, + Idle: float64(cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, + } + + ret = append(ret, c) + } + + return ret, nil +} + +func allCPUTimes() ([]TimesStat, error) { + var count C.mach_msg_type_number_t + var cpuload C.host_cpu_load_info_data_t + + count = C.HOST_CPU_LOAD_INFO_COUNT + + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_CPU_LOAD_INFO, + C.host_info_t(unsafe.Pointer(&cpuload)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics error=%d", status) + } + + c := TimesStat{ + CPU: "cpu-total", + User: float64(cpuload.cpu_ticks[C.CPU_STATE_USER]) / ClocksPerSec, + System: float64(cpuload.cpu_ticks[C.CPU_STATE_SYSTEM]) / ClocksPerSec, + Nice: float64(cpuload.cpu_ticks[C.CPU_STATE_NICE]) / ClocksPerSec, + Idle: float64(cpuload.cpu_ticks[C.CPU_STATE_IDLE]) / ClocksPerSec, + } + + return []TimesStat{c}, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_nocgo.go new file mode 100644 index 0000000000000..1af8566a67bef --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_darwin_nocgo.go @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && !cgo + +package cpu + +import "github.com/shirou/gopsutil/v4/internal/common" + +func perCPUTimes() ([]TimesStat, error) { + return []TimesStat{}, common.ErrNotImplementedError +} + +func allCPUTimes() ([]TimesStat, error) { + return []TimesStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go new file mode 100644 index 0000000000000..19b1e9dd3ef08 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly.go @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +import ( + "context" + "fmt" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "unsafe" + + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/tklauser/go-sysconf" + "golang.org/x/sys/unix" +) + +var ( + ClocksPerSec = float64(128) + cpuMatch = regexp.MustCompile(`^CPU:`) + originMatch = regexp.MustCompile(`Origin\s*=\s*"(.+)"\s+Id\s*=\s*(.+)\s+Stepping\s*=\s*(.+)`) + featuresMatch = regexp.MustCompile(`Features=.+<(.+)>`) + featuresMatch2 = regexp.MustCompile(`Features2=[a-f\dx]+<(.+)>`) + cpuEnd = regexp.MustCompile(`^Trying to mount root`) + cpuTimesSize int + emptyTimes cpuTimes +) + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + ClocksPerSec = float64(clkTck) + } +} + +func timeStat(name string, t *cpuTimes) *TimesStat { + return &TimesStat{ + User: float64(t.User) / ClocksPerSec, + Nice: float64(t.Nice) / ClocksPerSec, + System: float64(t.Sys) / ClocksPerSec, + Idle: float64(t.Idle) / ClocksPerSec, + Irq: float64(t.Intr) / ClocksPerSec, + CPU: name, + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + if percpu { + buf, err := unix.SysctlRaw("kern.cp_times") + if err != nil { + return nil, err + } + + // We can't do this in init due to the conflict with cpu.init() + if cpuTimesSize == 0 { + cpuTimesSize = int(reflect.TypeOf(cpuTimes{}).Size()) + } + + ncpus := len(buf) / cpuTimesSize + ret := make([]TimesStat, 0, ncpus) + for i := 0; i < ncpus; i++ { + times := (*cpuTimes)(unsafe.Pointer(&buf[i*cpuTimesSize])) + if *times == emptyTimes { + // CPU not present + continue + } + ret = append(ret, *timeStat(fmt.Sprintf("cpu%d", len(ret)), times)) + } + return ret, nil + } + + buf, err := unix.SysctlRaw("kern.cp_time") + if err != nil { + return nil, err + } + + times := (*cpuTimes)(unsafe.Pointer(&buf[0])) + return []TimesStat{*timeStat("cpu-total", times)}, nil +} + +// Returns only one InfoStat on DragonflyBSD. The information regarding core +// count, however is accurate and it is assumed that all InfoStat attributes +// are the same across CPUs. +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + const dmesgBoot = "/var/run/dmesg.boot" + + c, err := parseDmesgBoot(dmesgBoot) + if err != nil { + return nil, err + } + + var u32 uint32 + if u32, err = unix.SysctlUint32("hw.clockrate"); err != nil { + return nil, err + } + c.Mhz = float64(u32) + + var num int + var buf string + if buf, err = unix.Sysctl("hw.cpu_topology.tree"); err != nil { + return nil, err + } + num = strings.Count(buf, "CHIP") + c.Cores = int32(strings.Count(string(buf), "CORE") / num) + + if c.ModelName, err = unix.Sysctl("hw.model"); err != nil { + return nil, err + } + + ret := make([]InfoStat, num) + for i := 0; i < num; i++ { + ret[i] = c + } + + return ret, nil +} + +func parseDmesgBoot(fileName string) (InfoStat, error) { + c := InfoStat{} + lines, _ := common.ReadLines(fileName) + for _, line := range lines { + if matches := cpuEnd.FindStringSubmatch(line); matches != nil { + break + } else if matches := originMatch.FindStringSubmatch(line); matches != nil { + c.VendorID = matches[1] + t, err := strconv.ParseInt(matches[2], 10, 32) + if err != nil { + return c, fmt.Errorf("unable to parse DragonflyBSD CPU stepping information from %q: %v", line, err) + } + c.Stepping = int32(t) + } else if matches := featuresMatch.FindStringSubmatch(line); matches != nil { + for _, v := range strings.Split(matches[1], ",") { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } else if matches := featuresMatch2.FindStringSubmatch(line); matches != nil { + for _, v := range strings.Split(matches[1], ",") { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } + } + + return c, nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + return runtime.NumCPU(), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go new file mode 100644 index 0000000000000..25ececa680e00 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_dragonfly_amd64.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go new file mode 100644 index 0000000000000..245c1ec98b8ac --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_fallback.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build !darwin && !linux && !freebsd && !openbsd && !netbsd && !solaris && !windows && !dragonfly && !plan9 && !aix + +package cpu + +import ( + "context" + "runtime" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + return []TimesStat{}, common.ErrNotImplementedError +} + +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + return []InfoStat{}, common.ErrNotImplementedError +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + return runtime.NumCPU(), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go new file mode 100644 index 0000000000000..c68d6bff0f647 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd.go @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +import ( + "context" + "fmt" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "unsafe" + + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/tklauser/go-sysconf" + "golang.org/x/sys/unix" +) + +var ( + ClocksPerSec = float64(128) + cpuMatch = regexp.MustCompile(`^CPU:`) + originMatch = regexp.MustCompile(`Origin\s*=\s*"(.+)"\s+Id\s*=\s*(.+)\s+Family\s*=\s*(.+)\s+Model\s*=\s*(.+)\s+Stepping\s*=\s*(.+)`) + featuresMatch = regexp.MustCompile(`Features=.+<(.+)>`) + featuresMatch2 = regexp.MustCompile(`Features2=[a-f\dx]+<(.+)>`) + cpuEnd = regexp.MustCompile(`^Trying to mount root`) + cpuCores = regexp.MustCompile(`FreeBSD/SMP: (\d*) package\(s\) x (\d*) core\(s\)`) + cpuTimesSize int + emptyTimes cpuTimes +) + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + ClocksPerSec = float64(clkTck) + } +} + +func timeStat(name string, t *cpuTimes) *TimesStat { + return &TimesStat{ + User: float64(t.User) / ClocksPerSec, + Nice: float64(t.Nice) / ClocksPerSec, + System: float64(t.Sys) / ClocksPerSec, + Idle: float64(t.Idle) / ClocksPerSec, + Irq: float64(t.Intr) / ClocksPerSec, + CPU: name, + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + if percpu { + buf, err := unix.SysctlRaw("kern.cp_times") + if err != nil { + return nil, err + } + + // We can't do this in init due to the conflict with cpu.init() + if cpuTimesSize == 0 { + cpuTimesSize = int(reflect.TypeOf(cpuTimes{}).Size()) + } + + ncpus := len(buf) / cpuTimesSize + ret := make([]TimesStat, 0, ncpus) + for i := 0; i < ncpus; i++ { + times := (*cpuTimes)(unsafe.Pointer(&buf[i*cpuTimesSize])) + if *times == emptyTimes { + // CPU not present + continue + } + ret = append(ret, *timeStat(fmt.Sprintf("cpu%d", len(ret)), times)) + } + return ret, nil + } + + buf, err := unix.SysctlRaw("kern.cp_time") + if err != nil { + return nil, err + } + + times := (*cpuTimes)(unsafe.Pointer(&buf[0])) + return []TimesStat{*timeStat("cpu-total", times)}, nil +} + +// Returns only one InfoStat on FreeBSD. The information regarding core +// count, however is accurate and it is assumed that all InfoStat attributes +// are the same across CPUs. +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + const dmesgBoot = "/var/run/dmesg.boot" + + c, num, err := parseDmesgBoot(dmesgBoot) + if err != nil { + return nil, err + } + + var u32 uint32 + if u32, err = unix.SysctlUint32("hw.clockrate"); err != nil { + return nil, err + } + c.Mhz = float64(u32) + + if u32, err = unix.SysctlUint32("hw.ncpu"); err != nil { + return nil, err + } + c.Cores = int32(u32) + + if c.ModelName, err = unix.Sysctl("hw.model"); err != nil { + return nil, err + } + + ret := make([]InfoStat, num) + for i := 0; i < num; i++ { + ret[i] = c + } + + return ret, nil +} + +func parseDmesgBoot(fileName string) (InfoStat, int, error) { + c := InfoStat{} + lines, _ := common.ReadLines(fileName) + cpuNum := 1 // default cpu num is 1 + for _, line := range lines { + if matches := cpuEnd.FindStringSubmatch(line); matches != nil { + break + } else if matches := originMatch.FindStringSubmatch(line); matches != nil { + c.VendorID = matches[1] + c.Family = matches[3] + c.Model = matches[4] + t, err := strconv.ParseInt(matches[5], 10, 32) + if err != nil { + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU stepping information from %q: %v", line, err) + } + c.Stepping = int32(t) + } else if matches := featuresMatch.FindStringSubmatch(line); matches != nil { + for _, v := range strings.Split(matches[1], ",") { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } else if matches := featuresMatch2.FindStringSubmatch(line); matches != nil { + for _, v := range strings.Split(matches[1], ",") { + c.Flags = append(c.Flags, strings.ToLower(v)) + } + } else if matches := cpuCores.FindStringSubmatch(line); matches != nil { + t, err := strconv.ParseInt(matches[1], 10, 32) + if err != nil { + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU Nums from %q: %v", line, err) + } + cpuNum = int(t) + t2, err := strconv.ParseInt(matches[2], 10, 32) + if err != nil { + return c, 0, fmt.Errorf("unable to parse FreeBSD CPU cores from %q: %v", line, err) + } + c.Cores = int32(t2) + } + } + + return c, cpuNum, nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + return runtime.NumCPU(), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go new file mode 100644 index 0000000000000..e4799bcf5c4a1 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_386.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go new file mode 100644 index 0000000000000..25ececa680e00 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_amd64.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go new file mode 100644 index 0000000000000..e4799bcf5c4a1 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go new file mode 100644 index 0000000000000..25ececa680e00 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_freebsd_arm64.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go new file mode 100644 index 0000000000000..f78c61a25b621 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_linux.go @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux + +package cpu + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "strconv" + "strings" + + "github.com/tklauser/go-sysconf" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +var ClocksPerSec = float64(100) + +var armModelToModelName = map[uint64]string{ + 0x810: "ARM810", + 0x920: "ARM920", + 0x922: "ARM922", + 0x926: "ARM926", + 0x940: "ARM940", + 0x946: "ARM946", + 0x966: "ARM966", + 0xa20: "ARM1020", + 0xa22: "ARM1022", + 0xa26: "ARM1026", + 0xb02: "ARM11 MPCore", + 0xb36: "ARM1136", + 0xb56: "ARM1156", + 0xb76: "ARM1176", + 0xc05: "Cortex-A5", + 0xc07: "Cortex-A7", + 0xc08: "Cortex-A8", + 0xc09: "Cortex-A9", + 0xc0d: "Cortex-A17", + 0xc0f: "Cortex-A15", + 0xc0e: "Cortex-A17", + 0xc14: "Cortex-R4", + 0xc15: "Cortex-R5", + 0xc17: "Cortex-R7", + 0xc18: "Cortex-R8", + 0xc20: "Cortex-M0", + 0xc21: "Cortex-M1", + 0xc23: "Cortex-M3", + 0xc24: "Cortex-M4", + 0xc27: "Cortex-M7", + 0xc60: "Cortex-M0+", + 0xd01: "Cortex-A32", + 0xd02: "Cortex-A34", + 0xd03: "Cortex-A53", + 0xd04: "Cortex-A35", + 0xd05: "Cortex-A55", + 0xd06: "Cortex-A65", + 0xd07: "Cortex-A57", + 0xd08: "Cortex-A72", + 0xd09: "Cortex-A73", + 0xd0a: "Cortex-A75", + 0xd0b: "Cortex-A76", + 0xd0c: "Neoverse-N1", + 0xd0d: "Cortex-A77", + 0xd0e: "Cortex-A76AE", + 0xd13: "Cortex-R52", + 0xd20: "Cortex-M23", + 0xd21: "Cortex-M33", + 0xd40: "Neoverse-V1", + 0xd41: "Cortex-A78", + 0xd42: "Cortex-A78AE", + 0xd43: "Cortex-A65AE", + 0xd44: "Cortex-X1", + 0xd46: "Cortex-A510", + 0xd47: "Cortex-A710", + 0xd48: "Cortex-X2", + 0xd49: "Neoverse-N2", + 0xd4a: "Neoverse-E1", + 0xd4b: "Cortex-A78C", + 0xd4c: "Cortex-X1C", + 0xd4d: "Cortex-A715", + 0xd4e: "Cortex-X3", +} + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + ClocksPerSec = float64(clkTck) + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + filename := common.HostProcWithContext(ctx, "stat") + lines := []string{} + if percpu { + statlines, err := common.ReadLines(filename) + if err != nil || len(statlines) < 2 { + return []TimesStat{}, nil + } + for _, line := range statlines[1:] { + if !strings.HasPrefix(line, "cpu") { + break + } + lines = append(lines, line) + } + } else { + lines, _ = common.ReadLinesOffsetN(filename, 0, 1) + } + + ret := make([]TimesStat, 0, len(lines)) + + for _, line := range lines { + ct, err := parseStatLine(line) + if err != nil { + continue + } + ret = append(ret, *ct) + + } + return ret, nil +} + +func sysCPUPath(ctx context.Context, cpu int32, relPath string) string { + return common.HostSysWithContext(ctx, fmt.Sprintf("devices/system/cpu/cpu%d", cpu), relPath) +} + +func finishCPUInfo(ctx context.Context, c *InfoStat) { + var lines []string + var err error + var value float64 + + if len(c.CoreID) == 0 { + lines, err = common.ReadLines(sysCPUPath(ctx, c.CPU, "topology/core_id")) + if err == nil { + c.CoreID = lines[0] + } + } + + // override the value of c.Mhz with cpufreq/cpuinfo_max_freq regardless + // of the value from /proc/cpuinfo because we want to report the maximum + // clock-speed of the CPU for c.Mhz, matching the behaviour of Windows + lines, err = common.ReadLines(sysCPUPath(ctx, c.CPU, "cpufreq/cpuinfo_max_freq")) + // if we encounter errors below such as there are no cpuinfo_max_freq file, + // we just ignore. so let Mhz is 0. + if err != nil || len(lines) == 0 { + return + } + value, err = strconv.ParseFloat(lines[0], 64) + if err != nil { + return + } + c.Mhz = value / 1000.0 // value is in kHz + if c.Mhz > 9999 { + c.Mhz = c.Mhz / 1000.0 // value in Hz + } +} + +// CPUInfo on linux will return 1 item per physical thread. +// +// CPUs have three levels of counting: sockets, cores, threads. +// Cores with HyperThreading count as having 2 threads per core. +// Sockets often come with many physical CPU cores. +// For example a single socket board with two cores each with HT will +// return 4 CPUInfoStat structs on Linux and the "Cores" field set to 1. +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + filename := common.HostProcWithContext(ctx, "cpuinfo") + lines, _ := common.ReadLines(filename) + + var ret []InfoStat + var processorName string + + c := InfoStat{CPU: -1, Cores: 1} + for _, line := range lines { + fields := strings.Split(line, ":") + if len(fields) < 2 { + continue + } + key := strings.TrimSpace(fields[0]) + value := strings.TrimSpace(fields[1]) + + switch key { + case "Processor": + processorName = value + case "processor", "cpu number": + if c.CPU >= 0 { + finishCPUInfo(ctx, &c) + ret = append(ret, c) + } + c = InfoStat{Cores: 1, ModelName: processorName} + t, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return ret, err + } + c.CPU = int32(t) + case "vendorId", "vendor_id": + c.VendorID = value + if strings.Contains(value, "S390") { + processorName = "S390" + } + case "CPU implementer": + if v, err := strconv.ParseUint(value, 0, 8); err == nil { + switch v { + case 0x41: + c.VendorID = "ARM" + case 0x42: + c.VendorID = "Broadcom" + case 0x43: + c.VendorID = "Cavium" + case 0x44: + c.VendorID = "DEC" + case 0x46: + c.VendorID = "Fujitsu" + case 0x48: + c.VendorID = "HiSilicon" + case 0x49: + c.VendorID = "Infineon" + case 0x4d: + c.VendorID = "Motorola/Freescale" + case 0x4e: + c.VendorID = "NVIDIA" + case 0x50: + c.VendorID = "APM" + case 0x51: + c.VendorID = "Qualcomm" + case 0x56: + c.VendorID = "Marvell" + case 0x61: + c.VendorID = "Apple" + case 0x69: + c.VendorID = "Intel" + case 0xc0: + c.VendorID = "Ampere" + } + } + case "cpu family": + c.Family = value + case "model", "CPU part": + c.Model = value + // if CPU is arm based, model name is found via model number. refer to: arch/arm64/kernel/cpuinfo.c + if c.VendorID == "ARM" { + if v, err := strconv.ParseUint(c.Model, 0, 16); err == nil { + modelName, exist := armModelToModelName[v] + if exist { + c.ModelName = modelName + } else { + c.ModelName = "Undefined" + } + } + } + case "Model Name", "model name", "cpu": + c.ModelName = value + if strings.Contains(value, "POWER") { + c.Model = strings.Split(value, " ")[0] + c.Family = "POWER" + c.VendorID = "IBM" + } + case "stepping", "revision", "CPU revision": + val := value + + if key == "revision" { + val = strings.Split(value, ".")[0] + } + + t, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return ret, err + } + c.Stepping = int32(t) + case "cpu MHz", "clock", "cpu MHz dynamic": + // treat this as the fallback value, thus we ignore error + if t, err := strconv.ParseFloat(strings.Replace(value, "MHz", "", 1), 64); err == nil { + c.Mhz = t + } + case "cache size": + t, err := strconv.ParseInt(strings.Replace(value, " KB", "", 1), 10, 64) + if err != nil { + return ret, err + } + c.CacheSize = int32(t) + case "physical id": + c.PhysicalID = value + case "core id": + c.CoreID = value + case "flags", "Features": + c.Flags = strings.FieldsFunc(value, func(r rune) bool { + return r == ',' || r == ' ' + }) + case "microcode": + c.Microcode = value + } + } + if c.CPU >= 0 { + finishCPUInfo(ctx, &c) + ret = append(ret, c) + } + return ret, nil +} + +func parseStatLine(line string) (*TimesStat, error) { + fields := strings.Fields(line) + + if len(fields) < 8 { + return nil, errors.New("stat does not contain cpu info") + } + + if !strings.HasPrefix(fields[0], "cpu") { + return nil, errors.New("not contain cpu") + } + + cpu := fields[0] + if cpu == "cpu" { + cpu = "cpu-total" + } + user, err := strconv.ParseFloat(fields[1], 64) + if err != nil { + return nil, err + } + nice, err := strconv.ParseFloat(fields[2], 64) + if err != nil { + return nil, err + } + system, err := strconv.ParseFloat(fields[3], 64) + if err != nil { + return nil, err + } + idle, err := strconv.ParseFloat(fields[4], 64) + if err != nil { + return nil, err + } + iowait, err := strconv.ParseFloat(fields[5], 64) + if err != nil { + return nil, err + } + irq, err := strconv.ParseFloat(fields[6], 64) + if err != nil { + return nil, err + } + softirq, err := strconv.ParseFloat(fields[7], 64) + if err != nil { + return nil, err + } + + ct := &TimesStat{ + CPU: cpu, + User: user / ClocksPerSec, + Nice: nice / ClocksPerSec, + System: system / ClocksPerSec, + Idle: idle / ClocksPerSec, + Iowait: iowait / ClocksPerSec, + Irq: irq / ClocksPerSec, + Softirq: softirq / ClocksPerSec, + } + if len(fields) > 8 { // Linux >= 2.6.11 + steal, err := strconv.ParseFloat(fields[8], 64) + if err != nil { + return nil, err + } + ct.Steal = steal / ClocksPerSec + } + if len(fields) > 9 { // Linux >= 2.6.24 + guest, err := strconv.ParseFloat(fields[9], 64) + if err != nil { + return nil, err + } + ct.Guest = guest / ClocksPerSec + } + if len(fields) > 10 { // Linux >= 3.2.0 + guestNice, err := strconv.ParseFloat(fields[10], 64) + if err != nil { + return nil, err + } + ct.GuestNice = guestNice / ClocksPerSec + } + + return ct, nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + if logical { + ret := 0 + // https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_pslinux.py#L599 + procCpuinfo := common.HostProcWithContext(ctx, "cpuinfo") + lines, err := common.ReadLines(procCpuinfo) + if err == nil { + for _, line := range lines { + line = strings.ToLower(line) + if strings.HasPrefix(line, "processor") { + _, err = strconv.Atoi(strings.TrimSpace(line[strings.IndexByte(line, ':')+1:])) + if err == nil { + ret++ + } + } + } + } + if ret == 0 { + procStat := common.HostProcWithContext(ctx, "stat") + lines, err = common.ReadLines(procStat) + if err != nil { + return 0, err + } + for _, line := range lines { + if len(line) >= 4 && strings.HasPrefix(line, "cpu") && '0' <= line[3] && line[3] <= '9' { // `^cpu\d` regexp matching + ret++ + } + } + } + return ret, nil + } + // physical cores + // https://github.com/giampaolo/psutil/blob/8415355c8badc9c94418b19bdf26e622f06f0cce/psutil/_pslinux.py#L615-L628 + threadSiblingsLists := make(map[string]bool) + // These 2 files are the same but */core_cpus_list is newer while */thread_siblings_list is deprecated and may disappear in the future. + // https://www.kernel.org/doc/Documentation/admin-guide/cputopology.rst + // https://github.com/giampaolo/psutil/pull/1727#issuecomment-707624964 + // https://lkml.org/lkml/2019/2/26/41 + for _, glob := range []string{"devices/system/cpu/cpu[0-9]*/topology/core_cpus_list", "devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list"} { + if files, err := filepath.Glob(common.HostSysWithContext(ctx, glob)); err == nil { + for _, file := range files { + lines, err := common.ReadLines(file) + if err != nil || len(lines) != 1 { + continue + } + threadSiblingsLists[lines[0]] = true + } + ret := len(threadSiblingsLists) + if ret != 0 { + return ret, nil + } + } + } + // https://github.com/giampaolo/psutil/blob/122174a10b75c9beebe15f6c07dcf3afbe3b120d/psutil/_pslinux.py#L631-L652 + filename := common.HostProcWithContext(ctx, "cpuinfo") + lines, err := common.ReadLines(filename) + if err != nil { + return 0, err + } + mapping := make(map[int]int) + currentInfo := make(map[string]int) + for _, line := range lines { + line = strings.ToLower(strings.TrimSpace(line)) + if line == "" { + // new section + id, okID := currentInfo["physical id"] + cores, okCores := currentInfo["cpu cores"] + if okID && okCores { + mapping[id] = cores + } + currentInfo = make(map[string]int) + continue + } + fields := strings.Split(line, ":") + if len(fields) < 2 { + continue + } + fields[0] = strings.TrimSpace(fields[0]) + if fields[0] == "physical id" || fields[0] == "cpu cores" { + val, err := strconv.Atoi(strings.TrimSpace(fields[1])) + if err != nil { + continue + } + currentInfo[fields[0]] = val + } + } + ret := 0 + for _, v := range mapping { + ret += v + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go new file mode 100644 index 0000000000000..2cda5cd24375e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd.go @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build netbsd + +package cpu + +import ( + "context" + "fmt" + "runtime" + "unsafe" + + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/tklauser/go-sysconf" + "golang.org/x/sys/unix" +) + +const ( + // sys/sysctl.h + ctlKern = 1 // "high kernel": proc, limits + ctlHw = 6 // CTL_HW + kernCpTime = 51 // KERN_CPTIME +) + +var ClocksPerSec = float64(100) + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + ClocksPerSec = float64(clkTck) + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) (ret []TimesStat, err error) { + if !percpu { + mib := []int32{ctlKern, kernCpTime} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return ret, err + } + times := (*cpuTimes)(unsafe.Pointer(&buf[0])) + stat := TimesStat{ + CPU: "cpu-total", + User: float64(times.User), + Nice: float64(times.Nice), + System: float64(times.Sys), + Idle: float64(times.Idle), + Irq: float64(times.Intr), + } + return []TimesStat{stat}, nil + } + + ncpu, err := unix.SysctlUint32("hw.ncpu") + if err != nil { + return + } + + var i uint32 + for i = 0; i < ncpu; i++ { + mib := []int32{ctlKern, kernCpTime, int32(i)} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return ret, err + } + + stats := (*cpuTimes)(unsafe.Pointer(&buf[0])) + ret = append(ret, TimesStat{ + CPU: fmt.Sprintf("cpu%d", i), + User: float64(stats.User), + Nice: float64(stats.Nice), + System: float64(stats.Sys), + Idle: float64(stats.Idle), + Irq: float64(stats.Intr), + }) + } + + return ret, nil +} + +// Returns only one (minimal) CPUInfoStat on NetBSD +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + var err error + + c := InfoStat{} + + mhz, err := unix.Sysctl("machdep.dmi.processor-frequency") + if err != nil { + return nil, err + } + _, err = fmt.Sscanf(mhz, "%f", &c.Mhz) + if err != nil { + return nil, err + } + + ncpu, err := unix.SysctlUint32("hw.ncpuonline") + if err != nil { + return nil, err + } + c.Cores = int32(ncpu) + + if c.ModelName, err = unix.Sysctl("machdep.dmi.processor-version"); err != nil { + return nil, err + } + + return append(ret, c), nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + return runtime.NumCPU(), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go new file mode 100644 index 0000000000000..25ececa680e00 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_amd64.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go new file mode 100644 index 0000000000000..25ececa680e00 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_netbsd_arm64.go @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go new file mode 100644 index 0000000000000..33233d3c74aa2 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd.go @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd + +package cpu + +import ( + "context" + "fmt" + "runtime" + "unsafe" + + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/tklauser/go-sysconf" + "golang.org/x/sys/unix" +) + +const ( + // sys/sched.h + cpuOnline = 0x0001 // CPUSTATS_ONLINE + + // sys/sysctl.h + ctlKern = 1 // "high kernel": proc, limits + ctlHw = 6 // CTL_HW + smt = 24 // HW_SMT + kernCpTime = 40 // KERN_CPTIME + kernCPUStats = 85 // KERN_CPUSTATS +) + +var ClocksPerSec = float64(128) + +type cpuStats struct { + // cs_time[CPUSTATES] + User uint64 + Nice uint64 + Sys uint64 + Spin uint64 + Intr uint64 + Idle uint64 + + // cs_flags + Flags uint64 +} + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + ClocksPerSec = float64(clkTck) + } +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) (ret []TimesStat, err error) { + if !percpu { + mib := []int32{ctlKern, kernCpTime} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return ret, err + } + times := (*cpuTimes)(unsafe.Pointer(&buf[0])) + stat := TimesStat{ + CPU: "cpu-total", + User: float64(times.User) / ClocksPerSec, + Nice: float64(times.Nice) / ClocksPerSec, + System: float64(times.Sys) / ClocksPerSec, + Idle: float64(times.Idle) / ClocksPerSec, + Irq: float64(times.Intr) / ClocksPerSec, + } + return []TimesStat{stat}, nil + } + + ncpu, err := unix.SysctlUint32("hw.ncpu") + if err != nil { + return + } + + var i uint32 + for i = 0; i < ncpu; i++ { + mib := []int32{ctlKern, kernCPUStats, int32(i)} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return ret, err + } + + stats := (*cpuStats)(unsafe.Pointer(&buf[0])) + if (stats.Flags & cpuOnline) == 0 { + continue + } + ret = append(ret, TimesStat{ + CPU: fmt.Sprintf("cpu%d", i), + User: float64(stats.User) / ClocksPerSec, + Nice: float64(stats.Nice) / ClocksPerSec, + System: float64(stats.Sys) / ClocksPerSec, + Idle: float64(stats.Idle) / ClocksPerSec, + Irq: float64(stats.Intr) / ClocksPerSec, + }) + } + + return ret, nil +} + +// Returns only one (minimal) CPUInfoStat on OpenBSD +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + var err error + + c := InfoStat{} + + mhz, err := unix.SysctlUint32("hw.cpuspeed") + if err != nil { + return nil, err + } + c.Mhz = float64(mhz) + + ncpu, err := unix.SysctlUint32("hw.ncpuonline") + if err != nil { + return nil, err + } + c.Cores = int32(ncpu) + + if c.ModelName, err = unix.Sysctl("hw.model"); err != nil { + return nil, err + } + + return append(ret, c), nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + return runtime.NumCPU(), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go new file mode 100644 index 0000000000000..40a6f43e4980c --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_386.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Spin uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go new file mode 100644 index 0000000000000..464156d54025a --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_amd64.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Spin uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go new file mode 100644 index 0000000000000..40a6f43e4980c --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint32 + Nice uint32 + Sys uint32 + Spin uint32 + Intr uint32 + Idle uint32 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go new file mode 100644 index 0000000000000..464156d54025a --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_arm64.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Spin uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go new file mode 100644 index 0000000000000..464156d54025a --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_openbsd_riscv64.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +type cpuTimes struct { + User uint64 + Nice uint64 + Sys uint64 + Spin uint64 + Intr uint64 + Idle uint64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go new file mode 100644 index 0000000000000..bff2e0c7584d4 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_plan9.go @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build plan9 + +package cpu + +import ( + "context" + "os" + "runtime" + + stats "github.com/lufia/plan9stats" + "github.com/shirou/gopsutil/v4/internal/common" +) + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + // BUG: percpu flag is not supported yet. + root := os.Getenv("HOST_ROOT") + c, err := stats.ReadCPUType(ctx, stats.WithRootDir(root)) + if err != nil { + return nil, err + } + s, err := stats.ReadCPUStats(ctx, stats.WithRootDir(root)) + if err != nil { + return nil, err + } + return []TimesStat{ + { + CPU: c.Name, + User: s.User.Seconds(), + System: s.Sys.Seconds(), + Idle: s.Idle.Seconds(), + }, + }, nil +} + +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + return []InfoStat{}, common.ErrNotImplementedError +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + return runtime.NumCPU(), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go new file mode 100644 index 0000000000000..d8ba1d3242e5f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_solaris.go @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: BSD-3-Clause +package cpu + +import ( + "context" + "errors" + "fmt" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + + "github.com/tklauser/go-sysconf" +) + +var ClocksPerSec = float64(128) + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + ClocksPerSec = float64(clkTck) + } +} + +// sum all values in a float64 map with float64 keys +func msum(x map[float64]float64) float64 { + total := 0.0 + for _, y := range x { + total += y + } + return total +} + +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +var kstatSplit = regexp.MustCompile(`[:\s]+`) + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + kstatSysOut, err := invoke.CommandWithContext(ctx, "kstat", "-p", "cpu_stat:*:*:/^idle$|^user$|^kernel$|^iowait$|^swap$/") + if err != nil { + return nil, fmt.Errorf("cannot execute kstat: %s", err) + } + cpu := make(map[float64]float64) + idle := make(map[float64]float64) + user := make(map[float64]float64) + kern := make(map[float64]float64) + iowt := make(map[float64]float64) + // swap := make(map[float64]float64) + for _, line := range strings.Split(string(kstatSysOut), "\n") { + fields := kstatSplit.Split(line, -1) + if fields[0] != "cpu_stat" { + continue + } + cpuNumber, err := strconv.ParseFloat(fields[1], 64) + if err != nil { + return nil, fmt.Errorf("cannot parse cpu number: %s", err) + } + cpu[cpuNumber] = cpuNumber + switch fields[3] { + case "idle": + idle[cpuNumber], err = strconv.ParseFloat(fields[4], 64) + if err != nil { + return nil, fmt.Errorf("cannot parse idle: %s", err) + } + case "user": + user[cpuNumber], err = strconv.ParseFloat(fields[4], 64) + if err != nil { + return nil, fmt.Errorf("cannot parse user: %s", err) + } + case "kernel": + kern[cpuNumber], err = strconv.ParseFloat(fields[4], 64) + if err != nil { + return nil, fmt.Errorf("cannot parse kernel: %s", err) + } + case "iowait": + iowt[cpuNumber], err = strconv.ParseFloat(fields[4], 64) + if err != nil { + return nil, fmt.Errorf("cannot parse iowait: %s", err) + } + //not sure how this translates, don't report, add to kernel, something else? + /*case "swap": + swap[cpuNumber], err = strconv.ParseFloat(fields[4], 64) + if err != nil { + return nil, fmt.Errorf("cannot parse swap: %s", err) + } */ + } + } + ret := make([]TimesStat, 0, len(cpu)) + if percpu { + for _, c := range cpu { + ct := &TimesStat{ + CPU: fmt.Sprintf("cpu%d", int(cpu[c])), + Idle: idle[c] / ClocksPerSec, + User: user[c] / ClocksPerSec, + System: kern[c] / ClocksPerSec, + Iowait: iowt[c] / ClocksPerSec, + } + ret = append(ret, *ct) + } + } else { + ct := &TimesStat{ + CPU: "cpu-total", + Idle: msum(idle) / ClocksPerSec, + User: msum(user) / ClocksPerSec, + System: msum(kern) / ClocksPerSec, + Iowait: msum(iowt) / ClocksPerSec, + } + ret = append(ret, *ct) + } + return ret, nil +} + +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + psrInfoOut, err := invoke.CommandWithContext(ctx, "psrinfo", "-p", "-v") + if err != nil { + return nil, fmt.Errorf("cannot execute psrinfo: %s", err) + } + + procs, err := parseProcessorInfo(string(psrInfoOut)) + if err != nil { + return nil, fmt.Errorf("error parsing psrinfo output: %s", err) + } + + isaInfoOut, err := invoke.CommandWithContext(ctx, "isainfo", "-b", "-v") + if err != nil { + return nil, fmt.Errorf("cannot execute isainfo: %s", err) + } + + flags, err := parseISAInfo(string(isaInfoOut)) + if err != nil { + return nil, fmt.Errorf("error parsing isainfo output: %s", err) + } + + result := make([]InfoStat, 0, len(flags)) + for _, proc := range procs { + procWithFlags := proc + procWithFlags.Flags = flags + result = append(result, procWithFlags) + } + + return result, nil +} + +var flagsMatch = regexp.MustCompile(`[\w\.]+`) + +func parseISAInfo(cmdOutput string) ([]string, error) { + words := flagsMatch.FindAllString(cmdOutput, -1) + + // Sanity check the output + if len(words) < 4 || words[1] != "bit" || words[3] != "applications" { + return nil, errors.New("attempted to parse invalid isainfo output") + } + + flags := make([]string, len(words)-4) + for i, val := range words[4:] { + flags[i] = val + } + sort.Strings(flags) + + return flags, nil +} + +var psrInfoMatch = regexp.MustCompile(`The physical processor has (?:([\d]+) virtual processors? \(([\d-]+)\)|([\d]+) cores and ([\d]+) virtual processors[^\n]+)\n(?:\s+ The core has.+\n)*\s+.+ \((\w+) ([\S]+) family (.+) model (.+) step (.+) clock (.+) MHz\)\n[\s]*(.*)`) + +const ( + psrNumCoresOffset = 1 + psrNumCoresHTOffset = 3 + psrNumHTOffset = 4 + psrVendorIDOffset = 5 + psrFamilyOffset = 7 + psrModelOffset = 8 + psrStepOffset = 9 + psrClockOffset = 10 + psrModelNameOffset = 11 +) + +func parseProcessorInfo(cmdOutput string) ([]InfoStat, error) { + matches := psrInfoMatch.FindAllStringSubmatch(cmdOutput, -1) + + var infoStatCount int32 + result := make([]InfoStat, 0, len(matches)) + for physicalIndex, physicalCPU := range matches { + var step int32 + var clock float64 + + if physicalCPU[psrStepOffset] != "" { + stepParsed, err := strconv.ParseInt(physicalCPU[psrStepOffset], 10, 32) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for step as 32-bit integer: %s", physicalCPU[9], err) + } + step = int32(stepParsed) + } + + if physicalCPU[psrClockOffset] != "" { + clockParsed, err := strconv.ParseInt(physicalCPU[psrClockOffset], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for clock as 32-bit integer: %s", physicalCPU[10], err) + } + clock = float64(clockParsed) + } + + var err error + var numCores int64 + var numHT int64 + switch { + case physicalCPU[psrNumCoresOffset] != "": + numCores, err = strconv.ParseInt(physicalCPU[psrNumCoresOffset], 10, 32) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %s", physicalCPU[1], err) + } + + for i := 0; i < int(numCores); i++ { + result = append(result, InfoStat{ + CPU: infoStatCount, + PhysicalID: strconv.Itoa(physicalIndex), + CoreID: strconv.Itoa(i), + Cores: 1, + VendorID: physicalCPU[psrVendorIDOffset], + ModelName: physicalCPU[psrModelNameOffset], + Family: physicalCPU[psrFamilyOffset], + Model: physicalCPU[psrModelOffset], + Stepping: step, + Mhz: clock, + }) + infoStatCount++ + } + case physicalCPU[psrNumCoresHTOffset] != "": + numCores, err = strconv.ParseInt(physicalCPU[psrNumCoresHTOffset], 10, 32) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for core count as 32-bit integer: %s", physicalCPU[3], err) + } + + numHT, err = strconv.ParseInt(physicalCPU[psrNumHTOffset], 10, 32) + if err != nil { + return nil, fmt.Errorf("cannot parse value %q for hyperthread count as 32-bit integer: %s", physicalCPU[4], err) + } + + for i := 0; i < int(numCores); i++ { + result = append(result, InfoStat{ + CPU: infoStatCount, + PhysicalID: strconv.Itoa(physicalIndex), + CoreID: strconv.Itoa(i), + Cores: int32(numHT) / int32(numCores), + VendorID: physicalCPU[psrVendorIDOffset], + ModelName: physicalCPU[psrModelNameOffset], + Family: physicalCPU[psrFamilyOffset], + Model: physicalCPU[psrModelOffset], + Stepping: step, + Mhz: clock, + }) + infoStatCount++ + } + default: + return nil, errors.New("values for cores with and without hyperthreading are both set") + } + } + return result, nil +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + return runtime.NumCPU(), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go new file mode 100644 index 0000000000000..4476b91cb5f4b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/cpu/cpu_windows.go @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build windows + +package cpu + +import ( + "context" + "fmt" + "unsafe" + + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/yusufpapurcu/wmi" + "golang.org/x/sys/windows" +) + +var procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") + +type win32_Processor struct { + Family uint16 + Manufacturer string + Name string + NumberOfLogicalProcessors uint32 + NumberOfCores uint32 + ProcessorID *string + Stepping *string + MaxClockSpeed uint32 +} + +// SYSTEM_PROCESSOR_PERFORMANCE_INFORMATION +// defined in windows api doc with the following +// https://docs.microsoft.com/en-us/windows/desktop/api/winternl/nf-winternl-ntquerysysteminformation#system_processor_performance_information +// additional fields documented here +// https://www.geoffchappell.com/studies/windows/km/ntoskrnl/api/ex/sysinfo/processor_performance.htm +type win32_SystemProcessorPerformanceInformation struct { + IdleTime int64 // idle time in 100ns (this is not a filetime). + KernelTime int64 // kernel time in 100ns. kernel time includes idle time. (this is not a filetime). + UserTime int64 // usertime in 100ns (this is not a filetime). + DpcTime int64 // dpc time in 100ns (this is not a filetime). + InterruptTime int64 // interrupt time in 100ns + InterruptCount uint32 +} + +const ( + ClocksPerSec = 10000000.0 + + // systemProcessorPerformanceInformationClass information class to query with NTQuerySystemInformation + // https://processhacker.sourceforge.io/doc/ntexapi_8h.html#ad5d815b48e8f4da1ef2eb7a2f18a54e0 + win32_SystemProcessorPerformanceInformationClass = 8 + + // size of systemProcessorPerformanceInfoSize in memory + win32_SystemProcessorPerformanceInfoSize = uint32(unsafe.Sizeof(win32_SystemProcessorPerformanceInformation{})) +) + +// Times returns times stat per cpu and combined for all CPUs +func Times(percpu bool) ([]TimesStat, error) { + return TimesWithContext(context.Background(), percpu) +} + +func TimesWithContext(ctx context.Context, percpu bool) ([]TimesStat, error) { + if percpu { + return perCPUTimes() + } + + var ret []TimesStat + var lpIdleTime common.FILETIME + var lpKernelTime common.FILETIME + var lpUserTime common.FILETIME + r, _, _ := common.ProcGetSystemTimes.Call( + uintptr(unsafe.Pointer(&lpIdleTime)), + uintptr(unsafe.Pointer(&lpKernelTime)), + uintptr(unsafe.Pointer(&lpUserTime))) + if r == 0 { + return ret, windows.GetLastError() + } + + LOT := float64(0.0000001) + HIT := (LOT * 4294967296.0) + idle := ((HIT * float64(lpIdleTime.DwHighDateTime)) + (LOT * float64(lpIdleTime.DwLowDateTime))) + user := ((HIT * float64(lpUserTime.DwHighDateTime)) + (LOT * float64(lpUserTime.DwLowDateTime))) + kernel := ((HIT * float64(lpKernelTime.DwHighDateTime)) + (LOT * float64(lpKernelTime.DwLowDateTime))) + system := (kernel - idle) + + ret = append(ret, TimesStat{ + CPU: "cpu-total", + Idle: float64(idle), + User: float64(user), + System: float64(system), + }) + return ret, nil +} + +func Info() ([]InfoStat, error) { + return InfoWithContext(context.Background()) +} + +func InfoWithContext(ctx context.Context) ([]InfoStat, error) { + var ret []InfoStat + var dst []win32_Processor + q := wmi.CreateQuery(&dst, "") + if err := common.WMIQueryWithContext(ctx, q, &dst); err != nil { + return ret, err + } + + var procID string + for i, l := range dst { + procID = "" + if l.ProcessorID != nil { + procID = *l.ProcessorID + } + + cpu := InfoStat{ + CPU: int32(i), + Family: fmt.Sprintf("%d", l.Family), + VendorID: l.Manufacturer, + ModelName: l.Name, + Cores: int32(l.NumberOfLogicalProcessors), + PhysicalID: procID, + Mhz: float64(l.MaxClockSpeed), + Flags: []string{}, + } + ret = append(ret, cpu) + } + + return ret, nil +} + +// perCPUTimes returns times stat per cpu, per core and overall for all CPUs +func perCPUTimes() ([]TimesStat, error) { + var ret []TimesStat + stats, err := perfInfo() + if err != nil { + return nil, err + } + for core, v := range stats { + c := TimesStat{ + CPU: fmt.Sprintf("cpu%d", core), + User: float64(v.UserTime) / ClocksPerSec, + System: float64(v.KernelTime-v.IdleTime) / ClocksPerSec, + Idle: float64(v.IdleTime) / ClocksPerSec, + Irq: float64(v.InterruptTime) / ClocksPerSec, + } + ret = append(ret, c) + } + return ret, nil +} + +// makes call to Windows API function to retrieve performance information for each core +func perfInfo() ([]win32_SystemProcessorPerformanceInformation, error) { + // Make maxResults large for safety. + // We can't invoke the api call with a results array that's too small. + // If we have more than 2056 cores on a single host, then it's probably the future. + maxBuffer := 2056 + // buffer for results from the windows proc + resultBuffer := make([]win32_SystemProcessorPerformanceInformation, maxBuffer) + // size of the buffer in memory + bufferSize := uintptr(win32_SystemProcessorPerformanceInfoSize) * uintptr(maxBuffer) + // size of the returned response + var retSize uint32 + + // Invoke windows api proc. + // The returned err from the windows dll proc will always be non-nil even when successful. + // See https://godoc.org/golang.org/x/sys/windows#LazyProc.Call for more information + retCode, _, err := common.ProcNtQuerySystemInformation.Call( + win32_SystemProcessorPerformanceInformationClass, // System Information Class -> SystemProcessorPerformanceInformation + uintptr(unsafe.Pointer(&resultBuffer[0])), // pointer to first element in result buffer + bufferSize, // size of the buffer in memory + uintptr(unsafe.Pointer(&retSize)), // pointer to the size of the returned results the windows proc will set this + ) + + // check return code for errors + if retCode != 0 { + return nil, fmt.Errorf("call to NtQuerySystemInformation returned %d. err: %s", retCode, err.Error()) + } + + // calculate the number of returned elements based on the returned size + numReturnedElements := retSize / win32_SystemProcessorPerformanceInfoSize + + // trim results to the number of returned elements + resultBuffer = resultBuffer[:numReturnedElements] + + return resultBuffer, nil +} + +// SystemInfo is an equivalent representation of SYSTEM_INFO in the Windows API. +// https://msdn.microsoft.com/en-us/library/ms724958%28VS.85%29.aspx?f=255&MSPPError=-2147217396 +// https://github.com/elastic/go-windows/blob/bb1581babc04d5cb29a2bfa7a9ac6781c730c8dd/kernel32.go#L43 +type systemInfo struct { + wProcessorArchitecture uint16 + wReserved uint16 + dwPageSize uint32 + lpMinimumApplicationAddress uintptr + lpMaximumApplicationAddress uintptr + dwActiveProcessorMask uintptr + dwNumberOfProcessors uint32 + dwProcessorType uint32 + dwAllocationGranularity uint32 + wProcessorLevel uint16 + wProcessorRevision uint16 +} + +func CountsWithContext(ctx context.Context, logical bool) (int, error) { + if logical { + // https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L97 + ret := windows.GetActiveProcessorCount(windows.ALL_PROCESSOR_GROUPS) + if ret != 0 { + return int(ret), nil + } + var systemInfo systemInfo + _, _, err := procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo))) + if systemInfo.dwNumberOfProcessors == 0 { + return 0, err + } + return int(systemInfo.dwNumberOfProcessors), nil + } + // physical cores https://github.com/giampaolo/psutil/blob/d01a9eaa35a8aadf6c519839e987a49d8be2d891/psutil/_psutil_windows.c#L499 + // for the time being, try with unreliable and slow WMI call… + var dst []win32_Processor + q := wmi.CreateQuery(&dst, "") + if err := common.WMIQueryWithContext(ctx, q, &dst); err != nil { + return 0, err + } + var count uint32 + for _, d := range dst { + count += d.NumberOfCores + } + return int(count), nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go new file mode 100644 index 0000000000000..6e75e74b0183d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/binary.go @@ -0,0 +1,638 @@ +// SPDX-License-Identifier: BSD-3-Clause +package common + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package binary implements simple translation between numbers and byte +// sequences and encoding and decoding of varints. +// +// Numbers are translated by reading and writing fixed-size values. +// A fixed-size value is either a fixed-size arithmetic +// type (int8, uint8, int16, float32, complex64, ...) +// or an array or struct containing only fixed-size values. +// +// The varint functions encode and decode single integer values using +// a variable-length encoding; smaller values require fewer bytes. +// For a specification, see +// http://code.google.com/apis/protocolbuffers/docs/encoding.html. +// +// This package favors simplicity over efficiency. Clients that require +// high-performance serialization, especially for large data structures, +// should look at more advanced solutions such as the encoding/gob +// package or protocol buffers. + +import ( + "errors" + "io" + "math" + "reflect" +) + +// A ByteOrder specifies how to convert byte sequences into +// 16-, 32-, or 64-bit unsigned integers. +type ByteOrder interface { + Uint16([]byte) uint16 + Uint32([]byte) uint32 + Uint64([]byte) uint64 + PutUint16([]byte, uint16) + PutUint32([]byte, uint32) + PutUint64([]byte, uint64) + String() string +} + +// LittleEndian is the little-endian implementation of ByteOrder. +var LittleEndian littleEndian + +// BigEndian is the big-endian implementation of ByteOrder. +var BigEndian bigEndian + +type littleEndian struct{} + +func (littleEndian) Uint16(b []byte) uint16 { return uint16(b[0]) | uint16(b[1])<<8 } + +func (littleEndian) PutUint16(b []byte, v uint16) { + b[0] = byte(v) + b[1] = byte(v >> 8) +} + +func (littleEndian) Uint32(b []byte) uint32 { + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func (littleEndian) PutUint32(b []byte, v uint32) { + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) +} + +func (littleEndian) Uint64(b []byte) uint64 { + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func (littleEndian) PutUint64(b []byte, v uint64) { + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) + b[4] = byte(v >> 32) + b[5] = byte(v >> 40) + b[6] = byte(v >> 48) + b[7] = byte(v >> 56) +} + +func (littleEndian) String() string { return "LittleEndian" } + +func (littleEndian) GoString() string { return "binary.LittleEndian" } + +type bigEndian struct{} + +func (bigEndian) Uint16(b []byte) uint16 { return uint16(b[1]) | uint16(b[0])<<8 } + +func (bigEndian) PutUint16(b []byte, v uint16) { + b[0] = byte(v >> 8) + b[1] = byte(v) +} + +func (bigEndian) Uint32(b []byte) uint32 { + return uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 +} + +func (bigEndian) PutUint32(b []byte, v uint32) { + b[0] = byte(v >> 24) + b[1] = byte(v >> 16) + b[2] = byte(v >> 8) + b[3] = byte(v) +} + +func (bigEndian) Uint64(b []byte) uint64 { + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +func (bigEndian) PutUint64(b []byte, v uint64) { + b[0] = byte(v >> 56) + b[1] = byte(v >> 48) + b[2] = byte(v >> 40) + b[3] = byte(v >> 32) + b[4] = byte(v >> 24) + b[5] = byte(v >> 16) + b[6] = byte(v >> 8) + b[7] = byte(v) +} + +func (bigEndian) String() string { return "BigEndian" } + +func (bigEndian) GoString() string { return "binary.BigEndian" } + +// Read reads structured binary data from r into data. +// Data must be a pointer to a fixed-size value or a slice +// of fixed-size values. +// Bytes read from r are decoded using the specified byte order +// and written to successive fields of the data. +// When reading into structs, the field data for fields with +// blank (_) field names is skipped; i.e., blank field names +// may be used for padding. +// When reading into a struct, all non-blank fields must be exported. +func Read(r io.Reader, order ByteOrder, data interface{}) error { + // Fast path for basic types and slices. + if n := intDataSize(data); n != 0 { + var b [8]byte + var bs []byte + if n > len(b) { + bs = make([]byte, n) + } else { + bs = b[:n] + } + if _, err := io.ReadFull(r, bs); err != nil { + return err + } + switch data := data.(type) { + case *int8: + *data = int8(b[0]) + case *uint8: + *data = b[0] + case *int16: + *data = int16(order.Uint16(bs)) + case *uint16: + *data = order.Uint16(bs) + case *int32: + *data = int32(order.Uint32(bs)) + case *uint32: + *data = order.Uint32(bs) + case *int64: + *data = int64(order.Uint64(bs)) + case *uint64: + *data = order.Uint64(bs) + case []int8: + for i, x := range bs { // Easier to loop over the input for 8-bit values. + data[i] = int8(x) + } + case []uint8: + copy(data, bs) + case []int16: + for i := range data { + data[i] = int16(order.Uint16(bs[2*i:])) + } + case []uint16: + for i := range data { + data[i] = order.Uint16(bs[2*i:]) + } + case []int32: + for i := range data { + data[i] = int32(order.Uint32(bs[4*i:])) + } + case []uint32: + for i := range data { + data[i] = order.Uint32(bs[4*i:]) + } + case []int64: + for i := range data { + data[i] = int64(order.Uint64(bs[8*i:])) + } + case []uint64: + for i := range data { + data[i] = order.Uint64(bs[8*i:]) + } + } + return nil + } + + // Fallback to reflect-based decoding. + v := reflect.ValueOf(data) + size := -1 + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + size = dataSize(v) + case reflect.Slice: + size = dataSize(v) + } + if size < 0 { + return errors.New("binary.Read: invalid type " + reflect.TypeOf(data).String()) + } + d := &decoder{order: order, buf: make([]byte, size)} + if _, err := io.ReadFull(r, d.buf); err != nil { + return err + } + d.value(v) + return nil +} + +// Write writes the binary representation of data into w. +// Data must be a fixed-size value or a slice of fixed-size +// values, or a pointer to such data. +// Bytes written to w are encoded using the specified byte order +// and read from successive fields of the data. +// When writing structs, zero values are written for fields +// with blank (_) field names. +func Write(w io.Writer, order ByteOrder, data interface{}) error { + // Fast path for basic types and slices. + if n := intDataSize(data); n != 0 { + var b [8]byte + var bs []byte + if n > len(b) { + bs = make([]byte, n) + } else { + bs = b[:n] + } + switch v := data.(type) { + case *int8: + bs = b[:1] + b[0] = byte(*v) + case int8: + bs = b[:1] + b[0] = byte(v) + case []int8: + for i, x := range v { + bs[i] = byte(x) + } + case *uint8: + bs = b[:1] + b[0] = *v + case uint8: + bs = b[:1] + b[0] = byte(v) + case []uint8: + bs = v + case *int16: + bs = b[:2] + order.PutUint16(bs, uint16(*v)) + case int16: + bs = b[:2] + order.PutUint16(bs, uint16(v)) + case []int16: + for i, x := range v { + order.PutUint16(bs[2*i:], uint16(x)) + } + case *uint16: + bs = b[:2] + order.PutUint16(bs, *v) + case uint16: + bs = b[:2] + order.PutUint16(bs, v) + case []uint16: + for i, x := range v { + order.PutUint16(bs[2*i:], x) + } + case *int32: + bs = b[:4] + order.PutUint32(bs, uint32(*v)) + case int32: + bs = b[:4] + order.PutUint32(bs, uint32(v)) + case []int32: + for i, x := range v { + order.PutUint32(bs[4*i:], uint32(x)) + } + case *uint32: + bs = b[:4] + order.PutUint32(bs, *v) + case uint32: + bs = b[:4] + order.PutUint32(bs, v) + case []uint32: + for i, x := range v { + order.PutUint32(bs[4*i:], x) + } + case *int64: + bs = b[:8] + order.PutUint64(bs, uint64(*v)) + case int64: + bs = b[:8] + order.PutUint64(bs, uint64(v)) + case []int64: + for i, x := range v { + order.PutUint64(bs[8*i:], uint64(x)) + } + case *uint64: + bs = b[:8] + order.PutUint64(bs, *v) + case uint64: + bs = b[:8] + order.PutUint64(bs, v) + case []uint64: + for i, x := range v { + order.PutUint64(bs[8*i:], x) + } + } + _, err := w.Write(bs) + return err + } + + // Fallback to reflect-based encoding. + v := reflect.Indirect(reflect.ValueOf(data)) + size := dataSize(v) + if size < 0 { + return errors.New("binary.Write: invalid type " + reflect.TypeOf(data).String()) + } + buf := make([]byte, size) + e := &encoder{order: order, buf: buf} + e.value(v) + _, err := w.Write(buf) + return err +} + +// Size returns how many bytes Write would generate to encode the value v, which +// must be a fixed-size value or a slice of fixed-size values, or a pointer to such data. +// If v is neither of these, Size returns -1. +func Size(v interface{}) int { + return dataSize(reflect.Indirect(reflect.ValueOf(v))) +} + +// dataSize returns the number of bytes the actual data represented by v occupies in memory. +// For compound structures, it sums the sizes of the elements. Thus, for instance, for a slice +// it returns the length of the slice times the element size and does not count the memory +// occupied by the header. If the type of v is not acceptable, dataSize returns -1. +func dataSize(v reflect.Value) int { + if v.Kind() == reflect.Slice { + if s := sizeof(v.Type().Elem()); s >= 0 { + return s * v.Len() + } + return -1 + } + return sizeof(v.Type()) +} + +// sizeof returns the size >= 0 of variables for the given type or -1 if the type is not acceptable. +func sizeof(t reflect.Type) int { + switch t.Kind() { + case reflect.Array: + if s := sizeof(t.Elem()); s >= 0 { + return s * t.Len() + } + + case reflect.Struct: + sum := 0 + for i, n := 0, t.NumField(); i < n; i++ { + s := sizeof(t.Field(i).Type) + if s < 0 { + return -1 + } + sum += s + } + return sum + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.Ptr: + return int(t.Size()) + } + + return -1 +} + +type coder struct { + order ByteOrder + buf []byte +} + +type ( + decoder coder + encoder coder +) + +func (d *decoder) uint8() uint8 { + x := d.buf[0] + d.buf = d.buf[1:] + return x +} + +func (e *encoder) uint8(x uint8) { + e.buf[0] = x + e.buf = e.buf[1:] +} + +func (d *decoder) uint16() uint16 { + x := d.order.Uint16(d.buf[0:2]) + d.buf = d.buf[2:] + return x +} + +func (e *encoder) uint16(x uint16) { + e.order.PutUint16(e.buf[0:2], x) + e.buf = e.buf[2:] +} + +func (d *decoder) uint32() uint32 { + x := d.order.Uint32(d.buf[0:4]) + d.buf = d.buf[4:] + return x +} + +func (e *encoder) uint32(x uint32) { + e.order.PutUint32(e.buf[0:4], x) + e.buf = e.buf[4:] +} + +func (d *decoder) uint64() uint64 { + x := d.order.Uint64(d.buf[0:8]) + d.buf = d.buf[8:] + return x +} + +func (e *encoder) uint64(x uint64) { + e.order.PutUint64(e.buf[0:8], x) + e.buf = e.buf[8:] +} + +func (d *decoder) int8() int8 { return int8(d.uint8()) } + +func (e *encoder) int8(x int8) { e.uint8(uint8(x)) } + +func (d *decoder) int16() int16 { return int16(d.uint16()) } + +func (e *encoder) int16(x int16) { e.uint16(uint16(x)) } + +func (d *decoder) int32() int32 { return int32(d.uint32()) } + +func (e *encoder) int32(x int32) { e.uint32(uint32(x)) } + +func (d *decoder) int64() int64 { return int64(d.uint64()) } + +func (e *encoder) int64(x int64) { e.uint64(uint64(x)) } + +func (d *decoder) value(v reflect.Value) { + switch v.Kind() { + case reflect.Array: + l := v.Len() + for i := 0; i < l; i++ { + d.value(v.Index(i)) + } + + case reflect.Struct: + t := v.Type() + l := v.NumField() + for i := 0; i < l; i++ { + // Note: Calling v.CanSet() below is an optimization. + // It would be sufficient to check the field name, + // but creating the StructField info for each field is + // costly (run "go test -bench=ReadStruct" and compare + // results when making changes to this code). + if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + d.value(v) + } else { + d.skip(v) + } + } + + case reflect.Slice: + l := v.Len() + for i := 0; i < l; i++ { + d.value(v.Index(i)) + } + + case reflect.Int8: + v.SetInt(int64(d.int8())) + case reflect.Int16: + v.SetInt(int64(d.int16())) + case reflect.Int32: + v.SetInt(int64(d.int32())) + case reflect.Int64: + v.SetInt(d.int64()) + + case reflect.Uint8: + v.SetUint(uint64(d.uint8())) + case reflect.Uint16: + v.SetUint(uint64(d.uint16())) + case reflect.Uint32: + v.SetUint(uint64(d.uint32())) + case reflect.Uint64: + v.SetUint(d.uint64()) + + case reflect.Float32: + v.SetFloat(float64(math.Float32frombits(d.uint32()))) + case reflect.Float64: + v.SetFloat(math.Float64frombits(d.uint64())) + + case reflect.Complex64: + v.SetComplex(complex( + float64(math.Float32frombits(d.uint32())), + float64(math.Float32frombits(d.uint32())), + )) + case reflect.Complex128: + v.SetComplex(complex( + math.Float64frombits(d.uint64()), + math.Float64frombits(d.uint64()), + )) + } +} + +func (e *encoder) value(v reflect.Value) { + switch v.Kind() { + case reflect.Array: + l := v.Len() + for i := 0; i < l; i++ { + e.value(v.Index(i)) + } + + case reflect.Struct: + t := v.Type() + l := v.NumField() + for i := 0; i < l; i++ { + // see comment for corresponding code in decoder.value() + if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + e.value(v) + } else { + e.skip(v) + } + } + + case reflect.Slice: + l := v.Len() + for i := 0; i < l; i++ { + e.value(v.Index(i)) + } + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch v.Type().Kind() { + case reflect.Int8: + e.int8(int8(v.Int())) + case reflect.Int16: + e.int16(int16(v.Int())) + case reflect.Int32: + e.int32(int32(v.Int())) + case reflect.Int64: + e.int64(v.Int()) + } + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch v.Type().Kind() { + case reflect.Uint8: + e.uint8(uint8(v.Uint())) + case reflect.Uint16: + e.uint16(uint16(v.Uint())) + case reflect.Uint32: + e.uint32(uint32(v.Uint())) + case reflect.Uint64: + e.uint64(v.Uint()) + } + + case reflect.Float32, reflect.Float64: + switch v.Type().Kind() { + case reflect.Float32: + e.uint32(math.Float32bits(float32(v.Float()))) + case reflect.Float64: + e.uint64(math.Float64bits(v.Float())) + } + + case reflect.Complex64, reflect.Complex128: + switch v.Type().Kind() { + case reflect.Complex64: + x := v.Complex() + e.uint32(math.Float32bits(float32(real(x)))) + e.uint32(math.Float32bits(float32(imag(x)))) + case reflect.Complex128: + x := v.Complex() + e.uint64(math.Float64bits(real(x))) + e.uint64(math.Float64bits(imag(x))) + } + } +} + +func (d *decoder) skip(v reflect.Value) { + d.buf = d.buf[dataSize(v):] +} + +func (e *encoder) skip(v reflect.Value) { + n := dataSize(v) + for i := range e.buf[0:n] { + e.buf[i] = 0 + } + e.buf = e.buf[n:] +} + +// intDataSize returns the size of the data required to represent the data when encoded. +// It returns zero if the type cannot be implemented by the fast path in Read or Write. +func intDataSize(data interface{}) int { + switch data := data.(type) { + case int8, *int8, *uint8: + return 1 + case []int8: + return len(data) + case []uint8: + return len(data) + case int16, *int16, *uint16: + return 2 + case []int16: + return 2 * len(data) + case []uint16: + return 2 * len(data) + case int32, *int32, *uint32: + return 4 + case []int32: + return 4 * len(data) + case []uint32: + return 4 * len(data) + case int64, *int64, *uint64: + return 8 + case []int64: + return 8 * len(data) + case []uint64: + return 8 * len(data) + } + return 0 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go new file mode 100644 index 0000000000000..642aabc558321 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common.go @@ -0,0 +1,465 @@ +// SPDX-License-Identifier: BSD-3-Clause +package common + +// +// gopsutil is a port of psutil(http://pythonhosted.org/psutil/). +// This covers these architectures. +// - linux (amd64, arm) +// - freebsd (amd64) +// - windows (amd64) + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/shirou/gopsutil/v4/common" +) + +var ( + Timeout = 3 * time.Second + ErrTimeout = errors.New("command timed out") +) + +type Invoker interface { + Command(string, ...string) ([]byte, error) + CommandWithContext(context.Context, string, ...string) ([]byte, error) +} + +type Invoke struct{} + +func (i Invoke) Command(name string, arg ...string) ([]byte, error) { + ctx, cancel := context.WithTimeout(context.Background(), Timeout) + defer cancel() + return i.CommandWithContext(ctx, name, arg...) +} + +func (i Invoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) { + cmd := exec.CommandContext(ctx, name, arg...) + + var buf bytes.Buffer + cmd.Stdout = &buf + cmd.Stderr = &buf + + if err := cmd.Start(); err != nil { + return buf.Bytes(), err + } + + if err := cmd.Wait(); err != nil { + return buf.Bytes(), err + } + + return buf.Bytes(), nil +} + +type FakeInvoke struct { + Suffix string // Suffix species expected file name suffix such as "fail" + Error error // If Error specified, return the error. +} + +// Command in FakeInvoke returns from expected file if exists. +func (i FakeInvoke) Command(name string, arg ...string) ([]byte, error) { + if i.Error != nil { + return []byte{}, i.Error + } + + arch := runtime.GOOS + + commandName := filepath.Base(name) + + fname := strings.Join(append([]string{commandName}, arg...), "") + fname = url.QueryEscape(fname) + fpath := path.Join("testdata", arch, fname) + if i.Suffix != "" { + fpath += "_" + i.Suffix + } + if PathExists(fpath) { + return os.ReadFile(fpath) + } + return []byte{}, fmt.Errorf("could not find testdata: %s", fpath) +} + +func (i FakeInvoke) CommandWithContext(ctx context.Context, name string, arg ...string) ([]byte, error) { + return i.Command(name, arg...) +} + +var ErrNotImplementedError = errors.New("not implemented yet") + +// ReadFile reads contents from a file +func ReadFile(filename string) (string, error) { + content, err := os.ReadFile(filename) + if err != nil { + return "", err + } + + return string(content), nil +} + +// ReadLines reads contents from a file and splits them by new lines. +// A convenience wrapper to ReadLinesOffsetN(filename, 0, -1). +func ReadLines(filename string) ([]string, error) { + return ReadLinesOffsetN(filename, 0, -1) +} + +// ReadLine reads a file and returns the first occurrence of a line that is prefixed with prefix. +func ReadLine(filename string, prefix string) (string, error) { + f, err := os.Open(filename) + if err != nil { + return "", err + } + defer f.Close() + r := bufio.NewReader(f) + for { + line, err := r.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return "", err + } + if strings.HasPrefix(line, prefix) { + return line, nil + } + } + + return "", nil +} + +// ReadLinesOffsetN reads contents from file and splits them by new line. +// The offset tells at which line number to start. +// The count determines the number of lines to read (starting from offset): +// n >= 0: at most n lines +// n < 0: whole file +func ReadLinesOffsetN(filename string, offset uint, n int) ([]string, error) { + f, err := os.Open(filename) + if err != nil { + return []string{""}, err + } + defer f.Close() + + var ret []string + + r := bufio.NewReader(f) + for i := 0; i < n+int(offset) || n < 0; i++ { + line, err := r.ReadString('\n') + if err != nil { + if err == io.EOF && len(line) > 0 { + ret = append(ret, strings.Trim(line, "\n")) + } + break + } + if i < int(offset) { + continue + } + ret = append(ret, strings.Trim(line, "\n")) + } + + return ret, nil +} + +func IntToString(orig []int8) string { + ret := make([]byte, len(orig)) + size := -1 + for i, o := range orig { + if o == 0 { + size = i + break + } + ret[i] = byte(o) + } + if size == -1 { + size = len(orig) + } + + return string(ret[0:size]) +} + +func UintToString(orig []uint8) string { + ret := make([]byte, len(orig)) + size := -1 + for i, o := range orig { + if o == 0 { + size = i + break + } + ret[i] = byte(o) + } + if size == -1 { + size = len(orig) + } + + return string(ret[0:size]) +} + +func ByteToString(orig []byte) string { + n := -1 + l := -1 + for i, b := range orig { + // skip left side null + if l == -1 && b == 0 { + continue + } + if l == -1 { + l = i + } + + if b == 0 { + break + } + n = i + 1 + } + if n == -1 { + return string(orig) + } + return string(orig[l:n]) +} + +// ReadInts reads contents from single line file and returns them as []int32. +func ReadInts(filename string) ([]int64, error) { + f, err := os.Open(filename) + if err != nil { + return []int64{}, err + } + defer f.Close() + + var ret []int64 + + r := bufio.NewReader(f) + + // The int files that this is concerned with should only be one liners. + line, err := r.ReadString('\n') + if err != nil { + return []int64{}, err + } + + i, err := strconv.ParseInt(strings.Trim(line, "\n"), 10, 32) + if err != nil { + return []int64{}, err + } + ret = append(ret, i) + + return ret, nil +} + +// Parse Hex to uint32 without error +func HexToUint32(hex string) uint32 { + vv, _ := strconv.ParseUint(hex, 16, 32) + return uint32(vv) +} + +// Parse to int32 without error +func mustParseInt32(val string) int32 { + vv, _ := strconv.ParseInt(val, 10, 32) + return int32(vv) +} + +// Parse to uint64 without error +func mustParseUint64(val string) uint64 { + vv, _ := strconv.ParseInt(val, 10, 64) + return uint64(vv) +} + +// Parse to Float64 without error +func mustParseFloat64(val string) float64 { + vv, _ := strconv.ParseFloat(val, 64) + return vv +} + +// StringsHas checks the target string slice contains src or not +func StringsHas(target []string, src string) bool { + for _, t := range target { + if strings.TrimSpace(t) == src { + return true + } + } + return false +} + +// StringsContains checks the src in any string of the target string slice +func StringsContains(target []string, src string) bool { + for _, t := range target { + if strings.Contains(t, src) { + return true + } + } + return false +} + +// IntContains checks the src in any int of the target int slice. +func IntContains(target []int, src int) bool { + for _, t := range target { + if src == t { + return true + } + } + return false +} + +// get struct attributes. +// This method is used only for debugging platform dependent code. +func attributes(m interface{}) map[string]reflect.Type { + typ := reflect.TypeOf(m) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + attrs := make(map[string]reflect.Type) + if typ.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < typ.NumField(); i++ { + p := typ.Field(i) + if !p.Anonymous { + attrs[p.Name] = p.Type + } + } + + return attrs +} + +func PathExists(filename string) bool { + if _, err := os.Stat(filename); err == nil { + return true + } + return false +} + +// PathExistsWithContents returns the filename exists and it is not empty +func PathExistsWithContents(filename string) bool { + info, err := os.Stat(filename) + if err != nil { + return false + } + return info.Size() > 4 && !info.IsDir() // at least 4 bytes +} + +// GetEnvWithContext retrieves the environment variable key. If it does not exist it returns the default. +// The context may optionally contain a map superseding os.EnvKey. +func GetEnvWithContext(ctx context.Context, key string, dfault string, combineWith ...string) string { + var value string + if env, ok := ctx.Value(common.EnvKey).(common.EnvMap); ok { + value = env[common.EnvKeyType(key)] + } + if value == "" { + value = os.Getenv(key) + } + if value == "" { + value = dfault + } + + return combine(value, combineWith) +} + +// GetEnv retrieves the environment variable key. If it does not exist it returns the default. +func GetEnv(key string, dfault string, combineWith ...string) string { + value := os.Getenv(key) + if value == "" { + value = dfault + } + + return combine(value, combineWith) +} + +func combine(value string, combineWith []string) string { + switch len(combineWith) { + case 0: + return value + case 1: + return filepath.Join(value, combineWith[0]) + default: + all := make([]string, len(combineWith)+1) + all[0] = value + copy(all[1:], combineWith) + return filepath.Join(all...) + } +} + +func HostProc(combineWith ...string) string { + return GetEnv("HOST_PROC", "/proc", combineWith...) +} + +func HostSys(combineWith ...string) string { + return GetEnv("HOST_SYS", "/sys", combineWith...) +} + +func HostEtc(combineWith ...string) string { + return GetEnv("HOST_ETC", "/etc", combineWith...) +} + +func HostVar(combineWith ...string) string { + return GetEnv("HOST_VAR", "/var", combineWith...) +} + +func HostRun(combineWith ...string) string { + return GetEnv("HOST_RUN", "/run", combineWith...) +} + +func HostDev(combineWith ...string) string { + return GetEnv("HOST_DEV", "/dev", combineWith...) +} + +func HostRoot(combineWith ...string) string { + return GetEnv("HOST_ROOT", "/", combineWith...) +} + +func HostProcWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_PROC", "/proc", combineWith...) +} + +func HostProcMountInfoWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_PROC_MOUNTINFO", "", combineWith...) +} + +func HostSysWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_SYS", "/sys", combineWith...) +} + +func HostEtcWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_ETC", "/etc", combineWith...) +} + +func HostVarWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_VAR", "/var", combineWith...) +} + +func HostRunWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_RUN", "/run", combineWith...) +} + +func HostDevWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_DEV", "/dev", combineWith...) +} + +func HostRootWithContext(ctx context.Context, combineWith ...string) string { + return GetEnvWithContext(ctx, "HOST_ROOT", "/", combineWith...) +} + +// getSysctrlEnv sets LC_ALL=C in a list of env vars for use when running +// sysctl commands (see DoSysctrl). +func getSysctrlEnv(env []string) []string { + foundLC := false + for i, line := range env { + if strings.HasPrefix(line, "LC_ALL") { + env[i] = "LC_ALL=C" + foundLC = true + } + } + if !foundLC { + env = append(env, "LC_ALL=C") + } + return env +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go new file mode 100644 index 0000000000000..53f9ae8d9a5a4 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_darwin.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package common + +import ( + "context" + "os" + "os/exec" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func DoSysctrlWithContext(ctx context.Context, mib string) ([]string, error) { + cmd := exec.CommandContext(ctx, "sysctl", "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 + uintptr(unsafe.Pointer(&mib[0])), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + 202, // unix.SYS___SYSCTL https://github.com/golang/sys/blob/76b94024e4b621e672466e8db3d7f084e7ddcad2/unix/zsysnum_darwin_amd64.go#L146 + uintptr(unsafe.Pointer(&mib[0])), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go new file mode 100644 index 0000000000000..53cdceeb6d41c --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_freebsd.go @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build freebsd || openbsd + +package common + +import ( + "fmt" + "os" + "os/exec" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func SysctlUint(mib string) (uint64, error) { + buf, err := unix.SysctlRaw(mib) + if err != nil { + return 0, err + } + if len(buf) == 8 { // 64 bit + return *(*uint64)(unsafe.Pointer(&buf[0])), nil + } + if len(buf) == 4 { // 32bit + t := *(*uint32)(unsafe.Pointer(&buf[0])) + return uint64(t), nil + } + return 0, fmt.Errorf("unexpected size: %s, %d", mib, len(buf)) +} + +func DoSysctrl(mib string) ([]string, error) { + cmd := exec.Command("sysctl", "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + mibptr := unsafe.Pointer(&mib[0]) + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go new file mode 100644 index 0000000000000..85802dcb097ab --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_linux.go @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux + +package common + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" +) + +// cachedBootTime must be accessed via atomic.Load/StoreUint64 +var cachedBootTime uint64 + +func DoSysctrl(mib string) ([]string, error) { + cmd := exec.Command("sysctl", "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func NumProcs() (uint64, error) { + return NumProcsWithContext(context.Background()) +} + +func NumProcsWithContext(ctx context.Context) (uint64, error) { + f, err := os.Open(HostProcWithContext(ctx)) + if err != nil { + return 0, err + } + defer f.Close() + + list, err := f.Readdirnames(-1) + if err != nil { + return 0, err + } + var cnt uint64 + + for _, v := range list { + if _, err = strconv.ParseUint(v, 10, 64); err == nil { + cnt++ + } + } + + return cnt, nil +} + +func BootTimeWithContext(ctx context.Context, enableCache bool) (uint64, error) { + if enableCache { + t := atomic.LoadUint64(&cachedBootTime) + if t != 0 { + return t, nil + } + } + + system, role, err := VirtualizationWithContext(ctx) + if err != nil { + return 0, err + } + + useStatFile := true + if system == "lxc" && role == "guest" { + // if lxc, /proc/uptime is used. + useStatFile = false + } else if system == "docker" && role == "guest" { + // also docker, guest + useStatFile = false + } + + if useStatFile { + t, err := readBootTimeStat(ctx) + if err != nil { + return 0, err + } + if enableCache { + atomic.StoreUint64(&cachedBootTime, t) + } + } + + filename := HostProcWithContext(ctx, "uptime") + lines, err := ReadLines(filename) + if err != nil { + return handleBootTimeFileReadErr(err) + } + if len(lines) != 1 { + return 0, fmt.Errorf("wrong uptime format") + } + f := strings.Fields(lines[0]) + b, err := strconv.ParseFloat(f[0], 64) + if err != nil { + return 0, err + } + currentTime := float64(time.Now().UnixNano()) / float64(time.Second) + t := currentTime - b + + if enableCache { + atomic.StoreUint64(&cachedBootTime, uint64(t)) + } + + return uint64(t), nil +} + +func handleBootTimeFileReadErr(err error) (uint64, error) { + if os.IsPermission(err) { + var info syscall.Sysinfo_t + err := syscall.Sysinfo(&info) + if err != nil { + return 0, err + } + + currentTime := time.Now().UnixNano() / int64(time.Second) + t := currentTime - int64(info.Uptime) + return uint64(t), nil + } + return 0, err +} + +func readBootTimeStat(ctx context.Context) (uint64, error) { + filename := HostProcWithContext(ctx, "stat") + line, err := ReadLine(filename, "btime") + if err != nil { + return handleBootTimeFileReadErr(err) + } + if strings.HasPrefix(line, "btime") { + f := strings.Fields(line) + if len(f) != 2 { + return 0, fmt.Errorf("wrong btime format") + } + b, err := strconv.ParseInt(f[1], 10, 64) + if err != nil { + return 0, err + } + t := uint64(b) + return t, nil + } + return 0, fmt.Errorf("could not find btime") +} + +func Virtualization() (string, string, error) { + return VirtualizationWithContext(context.Background()) +} + +// required variables for concurrency safe virtualization caching +var ( + cachedVirtMap map[string]string + cachedVirtMutex sync.RWMutex + cachedVirtOnce sync.Once +) + +func VirtualizationWithContext(ctx context.Context) (string, string, error) { + var system, role string + + // if cached already, return from cache + cachedVirtMutex.RLock() // unlock won't be deferred so concurrent reads don't wait for long + if cachedVirtMap != nil { + cachedSystem, cachedRole := cachedVirtMap["system"], cachedVirtMap["role"] + cachedVirtMutex.RUnlock() + return cachedSystem, cachedRole, nil + } + cachedVirtMutex.RUnlock() + + filename := HostProcWithContext(ctx, "xen") + if PathExists(filename) { + system = "xen" + role = "guest" // assume guest + + if PathExists(filepath.Join(filename, "capabilities")) { + contents, err := ReadLines(filepath.Join(filename, "capabilities")) + if err == nil { + if StringsContains(contents, "control_d") { + role = "host" + } + } + } + } + + filename = HostProcWithContext(ctx, "modules") + if PathExists(filename) { + contents, err := ReadLines(filename) + if err == nil { + if StringsContains(contents, "kvm") { + system = "kvm" + role = "host" + } else if StringsContains(contents, "hv_util") { + system = "hyperv" + role = "guest" + } else if StringsContains(contents, "vboxdrv") { + system = "vbox" + role = "host" + } else if StringsContains(contents, "vboxguest") { + system = "vbox" + role = "guest" + } else if StringsContains(contents, "vmware") { + system = "vmware" + role = "guest" + } + } + } + + filename = HostProcWithContext(ctx, "cpuinfo") + if PathExists(filename) { + contents, err := ReadLines(filename) + if err == nil { + if StringsContains(contents, "QEMU Virtual CPU") || + StringsContains(contents, "Common KVM processor") || + StringsContains(contents, "Common 32-bit KVM processor") { + system = "kvm" + role = "guest" + } + } + } + + filename = HostProcWithContext(ctx, "bus/pci/devices") + if PathExists(filename) { + contents, err := ReadLines(filename) + if err == nil { + if StringsContains(contents, "virtio-pci") { + role = "guest" + } + } + } + + filename = HostProcWithContext(ctx) + if PathExists(filepath.Join(filename, "bc", "0")) { + system = "openvz" + role = "host" + } else if PathExists(filepath.Join(filename, "vz")) { + system = "openvz" + role = "guest" + } + + // not use dmidecode because it requires root + if PathExists(filepath.Join(filename, "self", "status")) { + contents, err := ReadLines(filepath.Join(filename, "self", "status")) + if err == nil { + if StringsContains(contents, "s_context:") || + StringsContains(contents, "VxID:") { + system = "linux-vserver" + } + // TODO: guest or host + } + } + + if PathExists(filepath.Join(filename, "1", "environ")) { + contents, err := ReadFile(filepath.Join(filename, "1", "environ")) + + if err == nil { + if strings.Contains(contents, "container=lxc") { + system = "lxc" + role = "guest" + } + } + } + + if PathExists(filepath.Join(filename, "self", "cgroup")) { + contents, err := ReadLines(filepath.Join(filename, "self", "cgroup")) + if err == nil { + if StringsContains(contents, "lxc") { + system = "lxc" + role = "guest" + } else if StringsContains(contents, "docker") { + system = "docker" + role = "guest" + } else if StringsContains(contents, "machine-rkt") { + system = "rkt" + role = "guest" + } else if PathExists("/usr/bin/lxc-version") { + system = "lxc" + role = "host" + } + } + } + + if PathExists(HostEtcWithContext(ctx, "os-release")) { + p, _, err := GetOSReleaseWithContext(ctx) + if err == nil && p == "coreos" { + system = "rkt" // Is it true? + role = "host" + } + } + + if PathExists(HostRootWithContext(ctx, ".dockerenv")) { + system = "docker" + role = "guest" + } + + // before returning for the first time, cache the system and role + cachedVirtOnce.Do(func() { + cachedVirtMutex.Lock() + defer cachedVirtMutex.Unlock() + cachedVirtMap = map[string]string{ + "system": system, + "role": role, + } + }) + + return system, role, nil +} + +func GetOSRelease() (platform string, version string, err error) { + return GetOSReleaseWithContext(context.Background()) +} + +func GetOSReleaseWithContext(ctx context.Context) (platform string, version string, err error) { + contents, err := ReadLines(HostEtcWithContext(ctx, "os-release")) + if err != nil { + return "", "", nil // return empty + } + for _, line := range contents { + field := strings.Split(line, "=") + if len(field) < 2 { + continue + } + switch field[0] { + case "ID": // use ID for lowercase + platform = trimQuotes(field[1]) + case "VERSION_ID": + version = trimQuotes(field[1]) + } + } + + // cleanup amazon ID + if platform == "amzn" { + platform = "amazon" + } + + return platform, version, nil +} + +// Remove quotes of the source string +func trimQuotes(s string) string { + if len(s) >= 2 { + if s[0] == '"' && s[len(s)-1] == '"' { + return s[1 : len(s)-1] + } + } + return s +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go new file mode 100644 index 0000000000000..206532126c98b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_netbsd.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build netbsd + +package common + +import ( + "os" + "os/exec" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func DoSysctrl(mib string) ([]string, error) { + cmd := exec.Command("sysctl", "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + mibptr := unsafe.Pointer(&mib[0]) + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go new file mode 100644 index 0000000000000..00fa19a2fb497 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_openbsd.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd + +package common + +import ( + "os" + "os/exec" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func DoSysctrl(mib string) ([]string, error) { + cmd := exec.Command("sysctl", "-n", mib) + cmd.Env = getSysctrlEnv(os.Environ()) + out, err := cmd.Output() + if err != nil { + return []string{}, err + } + v := strings.Replace(string(out), "{ ", "", 1) + v = strings.Replace(string(v), " }", "", 1) + values := strings.Fields(string(v)) + + return values, nil +} + +func CallSyscall(mib []int32) ([]byte, uint64, error) { + mibptr := unsafe.Pointer(&mib[0]) + miblen := uint64(len(mib)) + + // get required buffer size + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + var b []byte + return b, length, err + } + if length == 0 { + var b []byte + return b, length, err + } + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go new file mode 100644 index 0000000000000..2715b890bef70 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_unix.go @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux || freebsd || darwin || openbsd + +package common + +import ( + "context" + "errors" + "os/exec" + "strconv" + "strings" +) + +func CallLsofWithContext(ctx context.Context, invoke Invoker, pid int32, args ...string) ([]string, error) { + var cmd []string + if pid == 0 { // will get from all processes. + cmd = []string{"-a", "-n", "-P"} + } else { + cmd = []string{"-a", "-n", "-P", "-p", strconv.Itoa(int(pid))} + } + cmd = append(cmd, args...) + out, err := invoke.CommandWithContext(ctx, "lsof", cmd...) + if err != nil { + if errors.Is(err, exec.ErrNotFound) { + return []string{}, err + } + // if no pid found, lsof returns code 1. + if err.Error() == "exit status 1" && len(out) == 0 { + return []string{}, nil + } + } + lines := strings.Split(string(out), "\n") + + var ret []string + for _, l := range lines[1:] { + if len(l) == 0 { + continue + } + ret = append(ret, l) + } + return ret, nil +} + +func CallPgrepWithContext(ctx context.Context, invoke Invoker, pid int32) ([]int32, error) { + out, err := invoke.CommandWithContext(ctx, "pgrep", "-P", strconv.Itoa(int(pid))) + if err != nil { + return []int32{}, err + } + lines := strings.Split(string(out), "\n") + ret := make([]int32, 0, len(lines)) + for _, l := range lines { + if len(l) == 0 { + continue + } + i, err := strconv.ParseInt(l, 10, 32) + if err != nil { + continue + } + ret = append(ret, int32(i)) + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go new file mode 100644 index 0000000000000..766ed2fcba3be --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/common_windows.go @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build windows + +package common + +import ( + "context" + "fmt" + "path/filepath" + "reflect" + "strings" + "syscall" + "unsafe" + + "github.com/yusufpapurcu/wmi" + "golang.org/x/sys/windows" +) + +// for double values +type PDH_FMT_COUNTERVALUE_DOUBLE struct { + CStatus uint32 + DoubleValue float64 +} + +// for 64 bit integer values +type PDH_FMT_COUNTERVALUE_LARGE struct { + CStatus uint32 + LargeValue int64 +} + +// for long values +type PDH_FMT_COUNTERVALUE_LONG struct { + CStatus uint32 + LongValue int32 + padding [4]byte +} + +// windows system const +const ( + ERROR_SUCCESS = 0 + ERROR_FILE_NOT_FOUND = 2 + DRIVE_REMOVABLE = 2 + DRIVE_FIXED = 3 + HKEY_LOCAL_MACHINE = 0x80000002 + RRF_RT_REG_SZ = 0x00000002 + RRF_RT_REG_DWORD = 0x00000010 + PDH_FMT_LONG = 0x00000100 + PDH_FMT_DOUBLE = 0x00000200 + PDH_FMT_LARGE = 0x00000400 + PDH_INVALID_DATA = 0xc0000bc6 + PDH_INVALID_HANDLE = 0xC0000bbc + PDH_NO_DATA = 0x800007d5 + + STATUS_BUFFER_OVERFLOW = 0x80000005 + STATUS_BUFFER_TOO_SMALL = 0xC0000023 + STATUS_INFO_LENGTH_MISMATCH = 0xC0000004 +) + +const ( + ProcessBasicInformation = 0 + ProcessWow64Information = 26 + ProcessQueryInformation = windows.PROCESS_DUP_HANDLE | windows.PROCESS_QUERY_INFORMATION + + SystemExtendedHandleInformationClass = 64 +) + +var ( + Modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + ModNt = windows.NewLazySystemDLL("ntdll.dll") + ModPdh = windows.NewLazySystemDLL("pdh.dll") + ModPsapi = windows.NewLazySystemDLL("psapi.dll") + + ProcGetSystemTimes = Modkernel32.NewProc("GetSystemTimes") + ProcNtQuerySystemInformation = ModNt.NewProc("NtQuerySystemInformation") + ProcRtlGetNativeSystemInformation = ModNt.NewProc("RtlGetNativeSystemInformation") + ProcRtlNtStatusToDosError = ModNt.NewProc("RtlNtStatusToDosError") + ProcNtQueryInformationProcess = ModNt.NewProc("NtQueryInformationProcess") + ProcNtReadVirtualMemory = ModNt.NewProc("NtReadVirtualMemory") + ProcNtWow64QueryInformationProcess64 = ModNt.NewProc("NtWow64QueryInformationProcess64") + ProcNtWow64ReadVirtualMemory64 = ModNt.NewProc("NtWow64ReadVirtualMemory64") + + PdhOpenQuery = ModPdh.NewProc("PdhOpenQuery") + PdhAddEnglishCounterW = ModPdh.NewProc("PdhAddEnglishCounterW") + PdhCollectQueryData = ModPdh.NewProc("PdhCollectQueryData") + PdhGetFormattedCounterValue = ModPdh.NewProc("PdhGetFormattedCounterValue") + PdhCloseQuery = ModPdh.NewProc("PdhCloseQuery") + + procQueryDosDeviceW = Modkernel32.NewProc("QueryDosDeviceW") +) + +type FILETIME struct { + DwLowDateTime uint32 + DwHighDateTime uint32 +} + +// borrowed from net/interface_windows.go +func BytePtrToString(p *uint8) string { + a := (*[10000]uint8)(unsafe.Pointer(p)) + i := 0 + for a[i] != 0 { + i++ + } + return string(a[:i]) +} + +// CounterInfo struct is used to track a windows performance counter +// copied from https://github.com/mackerelio/mackerel-agent/ +type CounterInfo struct { + PostName string + CounterName string + Counter windows.Handle +} + +// CreateQuery with a PdhOpenQuery call +// copied from https://github.com/mackerelio/mackerel-agent/ +func CreateQuery() (windows.Handle, error) { + var query windows.Handle + r, _, err := PdhOpenQuery.Call(0, 0, uintptr(unsafe.Pointer(&query))) + if r != 0 { + return 0, err + } + return query, nil +} + +// CreateCounter with a PdhAddEnglishCounterW call +func CreateCounter(query windows.Handle, pname, cname string) (*CounterInfo, error) { + var counter windows.Handle + r, _, err := PdhAddEnglishCounterW.Call( + uintptr(query), + uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(cname))), + 0, + uintptr(unsafe.Pointer(&counter))) + if r != 0 { + return nil, err + } + return &CounterInfo{ + PostName: pname, + CounterName: cname, + Counter: counter, + }, nil +} + +// GetCounterValue get counter value from handle +// adapted from https://github.com/mackerelio/mackerel-agent/ +func GetCounterValue(counter windows.Handle) (float64, error) { + var value PDH_FMT_COUNTERVALUE_DOUBLE + r, _, err := PdhGetFormattedCounterValue.Call(uintptr(counter), PDH_FMT_DOUBLE, uintptr(0), uintptr(unsafe.Pointer(&value))) + if r != 0 && r != PDH_INVALID_DATA { + return 0.0, err + } + return value.DoubleValue, nil +} + +type Win32PerformanceCounter struct { + PostName string + CounterName string + Query windows.Handle + Counter windows.Handle +} + +func NewWin32PerformanceCounter(postName, counterName string) (*Win32PerformanceCounter, error) { + query, err := CreateQuery() + if err != nil { + return nil, err + } + counter := Win32PerformanceCounter{ + Query: query, + PostName: postName, + CounterName: counterName, + } + r, _, err := PdhAddEnglishCounterW.Call( + uintptr(counter.Query), + uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(counter.CounterName))), + 0, + uintptr(unsafe.Pointer(&counter.Counter)), + ) + if r != 0 { + return nil, err + } + return &counter, nil +} + +func (w *Win32PerformanceCounter) GetValue() (float64, error) { + r, _, err := PdhCollectQueryData.Call(uintptr(w.Query)) + if r != 0 && err != nil { + if r == PDH_NO_DATA { + return 0.0, fmt.Errorf("%w: this counter has not data", err) + } + return 0.0, err + } + + return GetCounterValue(w.Counter) +} + +func ProcessorQueueLengthCounter() (*Win32PerformanceCounter, error) { + return NewWin32PerformanceCounter("processor_queue_length", `\System\Processor Queue Length`) +} + +// WMIQueryWithContext - wraps wmi.Query with a timed-out context to avoid hanging +func WMIQueryWithContext(ctx context.Context, query string, dst interface{}, connectServerArgs ...interface{}) error { + if _, ok := ctx.Deadline(); !ok { + ctxTimeout, cancel := context.WithTimeout(ctx, Timeout) + defer cancel() + ctx = ctxTimeout + } + + errChan := make(chan error, 1) + go func() { + errChan <- wmi.Query(query, dst, connectServerArgs...) + }() + + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-errChan: + return err + } +} + +// Convert paths using native DOS format like: +// +// "\Device\HarddiskVolume1\Windows\systemew\file.txt" +// +// into: +// +// "C:\Windows\systemew\file.txt" +func ConvertDOSPath(p string) string { + rawDrive := strings.Join(strings.Split(p, `\`)[:3], `\`) + + for d := 'A'; d <= 'Z'; d++ { + szDeviceName := string(d) + ":" + szTarget := make([]uint16, 512) + ret, _, _ := procQueryDosDeviceW.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(szDeviceName))), + uintptr(unsafe.Pointer(&szTarget[0])), + uintptr(len(szTarget))) + if ret != 0 && windows.UTF16ToString(szTarget[:]) == rawDrive { + return filepath.Join(szDeviceName, p[len(rawDrive):]) + } + } + return p +} + +type NtStatus uint32 + +func (s NtStatus) Error() error { + if s == 0 { + return nil + } + return fmt.Errorf("NtStatus 0x%08x", uint32(s)) +} + +func (s NtStatus) IsError() bool { + return s>>30 == 3 +} + +type SystemExtendedHandleTableEntryInformation struct { + Object uintptr + UniqueProcessId uintptr + HandleValue uintptr + GrantedAccess uint32 + CreatorBackTraceIndex uint16 + ObjectTypeIndex uint16 + HandleAttributes uint32 + Reserved uint32 +} + +type SystemExtendedHandleInformation struct { + NumberOfHandles uintptr + Reserved uintptr + Handles [1]SystemExtendedHandleTableEntryInformation +} + +// CallWithExpandingBuffer https://github.com/hillu/go-ntdll +func CallWithExpandingBuffer(fn func() NtStatus, buf *[]byte, resultLength *uint32) NtStatus { + for { + if st := fn(); st == STATUS_BUFFER_OVERFLOW || st == STATUS_BUFFER_TOO_SMALL || st == STATUS_INFO_LENGTH_MISMATCH { + if int(*resultLength) <= cap(*buf) { + (*reflect.SliceHeader)(unsafe.Pointer(buf)).Len = int(*resultLength) + } else { + *buf = make([]byte, int(*resultLength)) + } + continue + } else { + if !st.IsError() { + *buf = (*buf)[:int(*resultLength)] + } + return st + } + } +} + +func NtQuerySystemInformation( + SystemInformationClass uint32, + SystemInformation *byte, + SystemInformationLength uint32, + ReturnLength *uint32, +) NtStatus { + r0, _, _ := ProcNtQuerySystemInformation.Call( + uintptr(SystemInformationClass), + uintptr(unsafe.Pointer(SystemInformation)), + uintptr(SystemInformationLength), + uintptr(unsafe.Pointer(ReturnLength))) + return NtStatus(r0) +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go new file mode 100644 index 0000000000000..113ff2e9f4256 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/endian.go @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: BSD-3-Clause +package common + +import "unsafe" + +// IsLittleEndian checks if the current platform uses little-endian. +// copied from https://github.com/ntrrg/ntgo/blob/v0.8.0/runtime/infrastructure.go#L16 (MIT License) +func IsLittleEndian() bool { + var x int16 = 0x0011 + return *(*byte)(unsafe.Pointer(&x)) == 0x11 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go new file mode 100644 index 0000000000000..8108a1caca7b1 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/sleep.go @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: BSD-3-Clause +package common + +import ( + "context" + "time" +) + +// Sleep awaits for provided interval. +// Can be interrupted by context cancelation. +func Sleep(ctx context.Context, interval time.Duration) error { + timer := time.NewTimer(interval) + select { + case <-ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return ctx.Err() + case <-timer.C: + return nil + } +} diff --git a/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go b/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go new file mode 100644 index 0000000000000..888cc57faee38 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/internal/common/warnings.go @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: BSD-3-Clause +package common + +import "fmt" + +type Warnings struct { + List []error + Verbose bool +} + +func (w *Warnings) Add(err error) { + w.List = append(w.List, err) +} + +func (w *Warnings) Reference() error { + if len(w.List) > 0 { + return w + } + return nil +} + +func (w *Warnings) Error() string { + if w.Verbose { + str := "" + for i, e := range w.List { + str += fmt.Sprintf("\tError %d: %s\n", i, e.Error()) + } + return str + } + return fmt.Sprintf("Number of warnings: %v", len(w.List)) +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go b/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go new file mode 100644 index 0000000000000..0a12fe2fe34d6 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/ex_linux.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux + +package mem + +import ( + "context" + "encoding/json" +) + +type ExVirtualMemory struct { + ActiveFile uint64 `json:"activefile"` + InactiveFile uint64 `json:"inactivefile"` + ActiveAnon uint64 `json:"activeanon"` + InactiveAnon uint64 `json:"inactiveanon"` + Unevictable uint64 `json:"unevictable"` +} + +func (v ExVirtualMemory) String() string { + s, _ := json.Marshal(v) + return string(s) +} + +type ExLinux struct{} + +func NewExLinux() *ExLinux { + return &ExLinux{} +} + +func (ex *ExLinux) VirtualMemory() (*ExVirtualMemory, error) { + return ex.VirtualMemoryWithContext(context.Background()) +} + +func (ex *ExLinux) VirtualMemoryWithContext(ctx context.Context) (*ExVirtualMemory, error) { + _, vmEx, err := fillFromMeminfoWithContext(ctx) + if err != nil { + return nil, err + } + return vmEx, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go b/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go new file mode 100644 index 0000000000000..4f1573b3c5b04 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/ex_windows.go @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build windows + +package mem + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +// ExVirtualMemory represents Windows specific information +// https://learn.microsoft.com/en-us/windows/win32/api/sysinfoapi/ns-sysinfoapi-memorystatusex +type ExVirtualMemory struct { + VirtualTotal uint64 `json:"virtualTotal"` + VirtualAvail uint64 `json:"virtualAvail"` +} + +type ExWindows struct{} + +func NewExWindows() *ExWindows { + return &ExWindows{} +} + +func (e *ExWindows) VirtualMemory() (*ExVirtualMemory, error) { + var memInfo memoryStatusEx + memInfo.cbSize = uint32(unsafe.Sizeof(memInfo)) + mem, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))) + if mem == 0 { + return nil, windows.GetLastError() + } + + ret := &ExVirtualMemory{ + VirtualTotal: memInfo.ullTotalVirtual, + VirtualAvail: memInfo.ullAvailVirtual, + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem.go new file mode 100644 index 0000000000000..0da71a98863d1 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem.go @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: BSD-3-Clause +package mem + +import ( + "encoding/json" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +var invoke common.Invoker = common.Invoke{} + +// Memory usage statistics. Total, Available and Used contain numbers of bytes +// for human consumption. +// +// The other fields in this struct contain kernel specific values. +type VirtualMemoryStat struct { + // Total amount of RAM on this system + Total uint64 `json:"total"` + + // RAM available for programs to allocate + // + // This value is computed from the kernel specific values. + Available uint64 `json:"available"` + + // RAM used by programs + // + // This value is computed from the kernel specific values. + Used uint64 `json:"used"` + + // Percentage of RAM used by programs + // + // This value is computed from the kernel specific values. + UsedPercent float64 `json:"usedPercent"` + + // This is the kernel's notion of free memory; RAM chips whose bits nobody + // cares about the value of right now. For a human consumable number, + // Available is what you really want. + Free uint64 `json:"free"` + + // OS X / BSD specific numbers: + // http://www.macyourself.com/2010/02/17/what-is-free-wired-active-and-inactive-system-memory-ram/ + Active uint64 `json:"active"` + Inactive uint64 `json:"inactive"` + Wired uint64 `json:"wired"` + + // FreeBSD specific numbers: + // https://reviews.freebsd.org/D8467 + Laundry uint64 `json:"laundry"` + + // Linux specific numbers + // https://www.centos.org/docs/5/html/5.1/Deployment_Guide/s2-proc-meminfo.html + // https://www.kernel.org/doc/Documentation/filesystems/proc.txt + // https://www.kernel.org/doc/Documentation/vm/overcommit-accounting + // https://www.kernel.org/doc/Documentation/vm/transhuge.txt + Buffers uint64 `json:"buffers"` + Cached uint64 `json:"cached"` + WriteBack uint64 `json:"writeBack"` + Dirty uint64 `json:"dirty"` + WriteBackTmp uint64 `json:"writeBackTmp"` + Shared uint64 `json:"shared"` + Slab uint64 `json:"slab"` + Sreclaimable uint64 `json:"sreclaimable"` + Sunreclaim uint64 `json:"sunreclaim"` + PageTables uint64 `json:"pageTables"` + SwapCached uint64 `json:"swapCached"` + CommitLimit uint64 `json:"commitLimit"` + CommittedAS uint64 `json:"committedAS"` + HighTotal uint64 `json:"highTotal"` + HighFree uint64 `json:"highFree"` + LowTotal uint64 `json:"lowTotal"` + LowFree uint64 `json:"lowFree"` + SwapTotal uint64 `json:"swapTotal"` + SwapFree uint64 `json:"swapFree"` + Mapped uint64 `json:"mapped"` + VmallocTotal uint64 `json:"vmallocTotal"` + VmallocUsed uint64 `json:"vmallocUsed"` + VmallocChunk uint64 `json:"vmallocChunk"` + HugePagesTotal uint64 `json:"hugePagesTotal"` + HugePagesFree uint64 `json:"hugePagesFree"` + HugePagesRsvd uint64 `json:"hugePagesRsvd"` + HugePagesSurp uint64 `json:"hugePagesSurp"` + HugePageSize uint64 `json:"hugePageSize"` + AnonHugePages uint64 `json:"anonHugePages"` +} + +type SwapMemoryStat struct { + Total uint64 `json:"total"` + Used uint64 `json:"used"` + Free uint64 `json:"free"` + UsedPercent float64 `json:"usedPercent"` + Sin uint64 `json:"sin"` + Sout uint64 `json:"sout"` + PgIn uint64 `json:"pgIn"` + PgOut uint64 `json:"pgOut"` + PgFault uint64 `json:"pgFault"` + + // Linux specific numbers + // https://www.kernel.org/doc/Documentation/cgroup-v2.txt + PgMajFault uint64 `json:"pgMajFault"` +} + +func (m VirtualMemoryStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} + +func (m SwapMemoryStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} + +type SwapDevice struct { + Name string `json:"name"` + UsedBytes uint64 `json:"usedBytes"` + FreeBytes uint64 `json:"freeBytes"` +} + +func (m SwapDevice) String() string { + s, _ := json.Marshal(m) + return string(s) +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go new file mode 100644 index 0000000000000..916bff30df3e1 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix.go @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix + +package mem + +import ( + "context" +) + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go new file mode 100644 index 0000000000000..2d03dd0c3f734 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_cgo.go @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix && cgo + +package mem + +import ( + "context" + + "github.com/power-devops/perfstat" +) + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + m, err := perfstat.MemoryTotalStat() + if err != nil { + return nil, err + } + pagesize := uint64(4096) + ret := VirtualMemoryStat{ + Total: uint64(m.RealTotal) * pagesize, + Available: uint64(m.RealAvailable) * pagesize, + Free: uint64(m.RealFree) * pagesize, + Used: uint64(m.RealInUse) * pagesize, + UsedPercent: 100 * float64(m.RealInUse) / float64(m.RealTotal), + Active: uint64(m.VirtualActive) * pagesize, + SwapTotal: uint64(m.PgSpTotal) * pagesize, + SwapFree: uint64(m.PgSpFree) * pagesize, + } + return &ret, nil +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + m, err := perfstat.MemoryTotalStat() + if err != nil { + return nil, err + } + pagesize := uint64(4096) + swapUsed := uint64(m.PgSpTotal-m.PgSpFree-m.PgSpRsvd) * pagesize + swapTotal := uint64(m.PgSpTotal) * pagesize + ret := SwapMemoryStat{ + Total: swapTotal, + Free: uint64(m.PgSpFree) * pagesize, + Used: swapUsed, + UsedPercent: float64(100*swapUsed) / float64(swapTotal), + Sin: uint64(m.PgSpIn), + Sout: uint64(m.PgSpOut), + PgIn: uint64(m.PageIn), + PgOut: uint64(m.PageOut), + PgFault: uint64(m.PageFaults), + } + return &ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go new file mode 100644 index 0000000000000..cfcc4f90f16e4 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_aix_nocgo.go @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix && !cgo + +package mem + +import ( + "context" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + vmem, swap, err := callSVMon(ctx) + if err != nil { + return nil, err + } + if vmem.Total == 0 { + return nil, common.ErrNotImplementedError + } + vmem.SwapTotal = swap.Total + vmem.SwapFree = swap.Free + return vmem, nil +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + _, swap, err := callSVMon(ctx) + if err != nil { + return nil, err + } + if swap.Total == 0 { + return nil, common.ErrNotImplementedError + } + return swap, nil +} + +func callSVMon(ctx context.Context) (*VirtualMemoryStat, *SwapMemoryStat, error) { + out, err := invoke.CommandWithContext(ctx, "svmon", "-G") + if err != nil { + return nil, nil, err + } + + pagesize := uint64(4096) + vmem := &VirtualMemoryStat{} + swap := &SwapMemoryStat{} + for _, line := range strings.Split(string(out), "\n") { + if strings.HasPrefix(line, "memory") { + p := strings.Fields(line) + if len(p) > 2 { + if t, err := strconv.ParseUint(p[1], 10, 64); err == nil { + vmem.Total = t * pagesize + } + if t, err := strconv.ParseUint(p[2], 10, 64); err == nil { + vmem.Used = t * pagesize + if vmem.Total > 0 { + vmem.UsedPercent = 100 * float64(vmem.Used) / float64(vmem.Total) + } + } + if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { + vmem.Free = t * pagesize + } + } + } else if strings.HasPrefix(line, "pg space") { + p := strings.Fields(line) + if len(p) > 3 { + if t, err := strconv.ParseUint(p[2], 10, 64); err == nil { + swap.Total = t * pagesize + } + if t, err := strconv.ParseUint(p[3], 10, 64); err == nil { + swap.Free = swap.Total - t*pagesize + } + } + break + } + } + return vmem, swap, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go new file mode 100644 index 0000000000000..4f3e57c038ab0 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_bsd.go @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build freebsd || openbsd || netbsd + +package mem + +import ( + "context" + "fmt" + "strconv" + "strings" +) + +const swapCommand = "swapctl" + +// swapctl column indexes +const ( + nameCol = 0 + totalKiBCol = 1 + usedKiBCol = 2 +) + +func SwapDevices() ([]*SwapDevice, error) { + return SwapDevicesWithContext(context.Background()) +} + +func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { + output, err := invoke.CommandWithContext(ctx, swapCommand, "-lk") + if err != nil { + return nil, fmt.Errorf("could not execute %q: %w", swapCommand, err) + } + + return parseSwapctlOutput(string(output)) +} + +func parseSwapctlOutput(output string) ([]*SwapDevice, error) { + lines := strings.Split(output, "\n") + if len(lines) == 0 { + return nil, fmt.Errorf("could not parse output of %q: no lines in %q", swapCommand, output) + } + + // Check header headerFields are as expected. + header := lines[0] + header = strings.ToLower(header) + header = strings.ReplaceAll(header, ":", "") + headerFields := strings.Fields(header) + if len(headerFields) < usedKiBCol { + return nil, fmt.Errorf("couldn't parse %q: too few fields in header %q", swapCommand, header) + } + if headerFields[nameCol] != "device" { + return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapCommand, headerFields[nameCol], "device") + } + if headerFields[totalKiBCol] != "1kb-blocks" && headerFields[totalKiBCol] != "1k-blocks" { + return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapCommand, headerFields[totalKiBCol], "1kb-blocks") + } + if headerFields[usedKiBCol] != "used" { + return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapCommand, headerFields[usedKiBCol], "used") + } + + var swapDevices []*SwapDevice + for _, line := range lines[1:] { + if line == "" { + continue // the terminal line is typically empty + } + fields := strings.Fields(line) + if len(fields) < usedKiBCol { + return nil, fmt.Errorf("couldn't parse %q: too few fields", swapCommand) + } + + totalKiB, err := strconv.ParseUint(fields[totalKiBCol], 10, 64) + if err != nil { + return nil, fmt.Errorf("couldn't parse 'Size' column in %q: %w", swapCommand, err) + } + + usedKiB, err := strconv.ParseUint(fields[usedKiBCol], 10, 64) + if err != nil { + return nil, fmt.Errorf("couldn't parse 'Used' column in %q: %w", swapCommand, err) + } + + swapDevices = append(swapDevices, &SwapDevice{ + Name: fields[nameCol], + UsedBytes: usedKiB * 1024, + FreeBytes: (totalKiB - usedKiB) * 1024, + }) + } + + return swapDevices, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go new file mode 100644 index 0000000000000..a33c5f125a2b0 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin.go @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package mem + +import ( + "context" + "fmt" + "unsafe" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func getHwMemsize() (uint64, error) { + total, err := unix.SysctlUint64("hw.memsize") + if err != nil { + return 0, err + } + return total, nil +} + +// xsw_usage in sys/sysctl.h +type swapUsage struct { + Total uint64 + Avail uint64 + Used uint64 + Pagesize int32 + Encrypted bool +} + +// SwapMemory returns swapinfo. +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + // https://github.com/yanllearnn/go-osstat/blob/ae8a279d26f52ec946a03698c7f50a26cfb427e3/memory/memory_darwin.go + var ret *SwapMemoryStat + + value, err := unix.SysctlRaw("vm.swapusage") + if err != nil { + return ret, err + } + if len(value) != 32 { + return ret, fmt.Errorf("unexpected output of sysctl vm.swapusage: %v (len: %d)", value, len(value)) + } + swap := (*swapUsage)(unsafe.Pointer(&value[0])) + + u := float64(0) + if swap.Total != 0 { + u = ((float64(swap.Total) - float64(swap.Avail)) / float64(swap.Total)) * 100.0 + } + + ret = &SwapMemoryStat{ + Total: swap.Total, + Used: swap.Used, + Free: swap.Avail, + UsedPercent: u, + } + + return ret, nil +} + +func SwapDevices() ([]*SwapDevice, error) { + return SwapDevicesWithContext(context.Background()) +} + +func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_cgo.go new file mode 100644 index 0000000000000..cc6657d045c13 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_cgo.go @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && cgo + +package mem + +/* +#include +#include +*/ +import "C" + +import ( + "context" + "fmt" + "unsafe" +) + +// VirtualMemory returns VirtualmemoryStat. +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + count := C.mach_msg_type_number_t(C.HOST_VM_INFO_COUNT) + var vmstat C.vm_statistics_data_t + + status := C.host_statistics(C.host_t(C.mach_host_self()), + C.HOST_VM_INFO, + C.host_info_t(unsafe.Pointer(&vmstat)), + &count) + + if status != C.KERN_SUCCESS { + return nil, fmt.Errorf("host_statistics error=%d", status) + } + + pageSize := uint64(C.vm_kernel_page_size) + total, err := getHwMemsize() + if err != nil { + return nil, err + } + totalCount := C.natural_t(total / pageSize) + + availableCount := vmstat.inactive_count + vmstat.free_count + usedPercent := 100 * float64(totalCount-availableCount) / float64(totalCount) + + usedCount := totalCount - availableCount + + return &VirtualMemoryStat{ + Total: total, + Available: pageSize * uint64(availableCount), + Used: pageSize * uint64(usedCount), + UsedPercent: usedPercent, + Free: pageSize * uint64(vmstat.free_count), + Active: pageSize * uint64(vmstat.active_count), + Inactive: pageSize * uint64(vmstat.inactive_count), + Wired: pageSize * uint64(vmstat.wire_count), + }, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_nocgo.go new file mode 100644 index 0000000000000..097a93e63e4cc --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_darwin_nocgo.go @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && !cgo + +package mem + +import ( + "context" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +// Runs vm_stat and returns Free and inactive pages +func getVMStat(vms *VirtualMemoryStat) error { + out, err := invoke.Command("vm_stat") + if err != nil { + return err + } + return parseVMStat(string(out), vms) +} + +func parseVMStat(out string, vms *VirtualMemoryStat) error { + var err error + + lines := strings.Split(out, "\n") + pagesize := uint64(unix.Getpagesize()) + for _, line := range lines { + fields := strings.Split(line, ":") + if len(fields) < 2 { + continue + } + key := strings.TrimSpace(fields[0]) + value := strings.Trim(fields[1], " .") + switch key { + case "Pages free": + free, e := strconv.ParseUint(value, 10, 64) + if e != nil { + err = e + } + vms.Free = free * pagesize + case "Pages inactive": + inactive, e := strconv.ParseUint(value, 10, 64) + if e != nil { + err = e + } + vms.Inactive = inactive * pagesize + case "Pages active": + active, e := strconv.ParseUint(value, 10, 64) + if e != nil { + err = e + } + vms.Active = active * pagesize + case "Pages wired down": + wired, e := strconv.ParseUint(value, 10, 64) + if e != nil { + err = e + } + vms.Wired = wired * pagesize + } + } + return err +} + +// VirtualMemory returns VirtualmemoryStat. +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + ret := &VirtualMemoryStat{} + + total, err := getHwMemsize() + if err != nil { + return nil, err + } + err = getVMStat(ret) + if err != nil { + return nil, err + } + + ret.Available = ret.Free + ret.Inactive + ret.Total = total + + ret.Used = ret.Total - ret.Available + ret.UsedPercent = 100 * float64(ret.Used) / float64(ret.Total) + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go new file mode 100644 index 0000000000000..ba882c8bee935 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_fallback.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build !darwin && !linux && !freebsd && !openbsd && !solaris && !windows && !plan9 && !aix && !netbsd + +package mem + +import ( + "context" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + return nil, common.ErrNotImplementedError +} + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + return nil, common.ErrNotImplementedError +} + +func SwapDevices() ([]*SwapDevice, error) { + return SwapDevicesWithContext(context.Background()) +} + +func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go new file mode 100644 index 0000000000000..d9cae7116b240 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_freebsd.go @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build freebsd + +package mem + +import ( + "context" + "errors" + "unsafe" + + "github.com/shirou/gopsutil/v4/internal/common" + "golang.org/x/sys/unix" +) + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + pageSize, err := common.SysctlUint("vm.stats.vm.v_page_size") + if err != nil { + return nil, err + } + physmem, err := common.SysctlUint("hw.physmem") + if err != nil { + return nil, err + } + + free, err := common.SysctlUint("vm.stats.vm.v_free_count") + if err != nil { + return nil, err + } + active, err := common.SysctlUint("vm.stats.vm.v_active_count") + if err != nil { + return nil, err + } + inactive, err := common.SysctlUint("vm.stats.vm.v_inactive_count") + if err != nil { + return nil, err + } + buffers, err := common.SysctlUint("vfs.bufspace") + if err != nil { + return nil, err + } + wired, err := common.SysctlUint("vm.stats.vm.v_wire_count") + if err != nil { + return nil, err + } + var cached, laundry uint64 + osreldate, _ := common.SysctlUint("kern.osreldate") + if osreldate < 1102000 { + cached, err = common.SysctlUint("vm.stats.vm.v_cache_count") + if err != nil { + return nil, err + } + } else { + laundry, err = common.SysctlUint("vm.stats.vm.v_laundry_count") + if err != nil { + return nil, err + } + } + + p := pageSize + ret := &VirtualMemoryStat{ + Total: physmem, + Free: free * p, + Active: active * p, + Inactive: inactive * p, + Cached: cached * p, + Buffers: buffers, + Wired: wired * p, + Laundry: laundry * p, + } + + ret.Available = ret.Inactive + ret.Cached + ret.Free + ret.Laundry + ret.Used = ret.Total - ret.Available + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 + + return ret, nil +} + +// Return swapinfo +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +// Constants from vm/vm_param.h +// nolint: golint +const ( + XSWDEV_VERSION11 = 1 + XSWDEV_VERSION = 2 +) + +// Types from vm/vm_param.h +type xswdev struct { + Version uint32 // Version is the version + Dev uint64 // Dev is the device identifier + Flags int32 // Flags is the swap flags applied to the device + NBlks int32 // NBlks is the total number of blocks + Used int32 // Used is the number of blocks used +} + +// xswdev11 is a compatibility for under FreeBSD 11 +// sys/vm/swap_pager.c +type xswdev11 struct { + Version uint32 // Version is the version + Dev uint32 // Dev is the device identifier + Flags int32 // Flags is the swap flags applied to the device + NBlks int32 // NBlks is the total number of blocks + Used int32 // Used is the number of blocks used +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + // FreeBSD can have multiple swap devices so we total them up + i, err := common.SysctlUint("vm.nswapdev") + if err != nil { + return nil, err + } + + if i == 0 { + return nil, errors.New("no swap devices found") + } + + c := int(i) + + i, err = common.SysctlUint("vm.stats.vm.v_page_size") + if err != nil { + return nil, err + } + pageSize := i + + var buf []byte + s := &SwapMemoryStat{} + for n := 0; n < c; n++ { + buf, err = unix.SysctlRaw("vm.swap_info", n) + if err != nil { + return nil, err + } + + // first, try to parse with version 2 + xsw := (*xswdev)(unsafe.Pointer(&buf[0])) + if xsw.Version == XSWDEV_VERSION11 { + // this is version 1, so try to parse again + xsw := (*xswdev11)(unsafe.Pointer(&buf[0])) + if xsw.Version != XSWDEV_VERSION11 { + return nil, errors.New("xswdev version mismatch(11)") + } + s.Total += uint64(xsw.NBlks) + s.Used += uint64(xsw.Used) + } else if xsw.Version != XSWDEV_VERSION { + return nil, errors.New("xswdev version mismatch") + } else { + s.Total += uint64(xsw.NBlks) + s.Used += uint64(xsw.Used) + } + + } + + if s.Total != 0 { + s.UsedPercent = float64(s.Used) / float64(s.Total) * 100 + } + s.Total *= pageSize + s.Used *= pageSize + s.Free = s.Total - s.Used + + return s, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go new file mode 100644 index 0000000000000..05bfdaf4e1af6 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_linux.go @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux + +package mem + +import ( + "bufio" + "context" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + vm, _, err := fillFromMeminfoWithContext(ctx) + if err != nil { + return nil, err + } + return vm, nil +} + +func fillFromMeminfoWithContext(ctx context.Context) (*VirtualMemoryStat, *ExVirtualMemory, error) { + filename := common.HostProcWithContext(ctx, "meminfo") + lines, _ := common.ReadLines(filename) + + // flag if MemAvailable is in /proc/meminfo (kernel 3.14+) + memavail := false + activeFile := false // "Active(file)" not available: 2.6.28 / Dec 2008 + inactiveFile := false // "Inactive(file)" not available: 2.6.28 / Dec 2008 + sReclaimable := false // "Sreclaimable:" not available: 2.6.19 / Nov 2006 + + ret := &VirtualMemoryStat{} + retEx := &ExVirtualMemory{} + + for _, line := range lines { + fields := strings.Split(line, ":") + if len(fields) != 2 { + continue + } + key := strings.TrimSpace(fields[0]) + value := strings.TrimSpace(fields[1]) + value = strings.Replace(value, " kB", "", -1) + + switch key { + case "MemTotal": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.Total = t * 1024 + case "MemFree": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.Free = t * 1024 + case "MemAvailable": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + memavail = true + ret.Available = t * 1024 + case "Buffers": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.Buffers = t * 1024 + case "Cached": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.Cached = t * 1024 + case "Active": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.Active = t * 1024 + case "Inactive": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.Inactive = t * 1024 + case "Active(anon)": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + retEx.ActiveAnon = t * 1024 + case "Inactive(anon)": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + retEx.InactiveAnon = t * 1024 + case "Active(file)": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + activeFile = true + retEx.ActiveFile = t * 1024 + case "Inactive(file)": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + inactiveFile = true + retEx.InactiveFile = t * 1024 + case "Unevictable": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + retEx.Unevictable = t * 1024 + case "Writeback": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.WriteBack = t * 1024 + case "WritebackTmp": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.WriteBackTmp = t * 1024 + case "Dirty": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.Dirty = t * 1024 + case "Shmem": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.Shared = t * 1024 + case "Slab": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.Slab = t * 1024 + case "SReclaimable": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + sReclaimable = true + ret.Sreclaimable = t * 1024 + case "SUnreclaim": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.Sunreclaim = t * 1024 + case "PageTables": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.PageTables = t * 1024 + case "SwapCached": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.SwapCached = t * 1024 + case "CommitLimit": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.CommitLimit = t * 1024 + case "Committed_AS": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.CommittedAS = t * 1024 + case "HighTotal": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.HighTotal = t * 1024 + case "HighFree": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.HighFree = t * 1024 + case "LowTotal": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.LowTotal = t * 1024 + case "LowFree": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.LowFree = t * 1024 + case "SwapTotal": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.SwapTotal = t * 1024 + case "SwapFree": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.SwapFree = t * 1024 + case "Mapped": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.Mapped = t * 1024 + case "VmallocTotal": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.VmallocTotal = t * 1024 + case "VmallocUsed": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.VmallocUsed = t * 1024 + case "VmallocChunk": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.VmallocChunk = t * 1024 + case "HugePages_Total": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.HugePagesTotal = t + case "HugePages_Free": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.HugePagesFree = t + case "HugePages_Rsvd": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.HugePagesRsvd = t + case "HugePages_Surp": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.HugePagesSurp = t + case "Hugepagesize": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.HugePageSize = t * 1024 + case "AnonHugePages": + t, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return ret, retEx, err + } + ret.AnonHugePages = t * 1024 + } + } + + ret.Cached += ret.Sreclaimable + + if !memavail { + if activeFile && inactiveFile && sReclaimable { + ret.Available = calculateAvailVmem(ctx, ret, retEx) + } else { + ret.Available = ret.Cached + ret.Free + } + } + + ret.Used = ret.Total - ret.Free - ret.Buffers - ret.Cached + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 + + return ret, retEx, nil +} + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + sysinfo := &unix.Sysinfo_t{} + + if err := unix.Sysinfo(sysinfo); err != nil { + return nil, err + } + ret := &SwapMemoryStat{ + Total: uint64(sysinfo.Totalswap) * uint64(sysinfo.Unit), + Free: uint64(sysinfo.Freeswap) * uint64(sysinfo.Unit), + } + ret.Used = ret.Total - ret.Free + // check Infinity + if ret.Total != 0 { + ret.UsedPercent = float64(ret.Total-ret.Free) / float64(ret.Total) * 100.0 + } else { + ret.UsedPercent = 0 + } + filename := common.HostProcWithContext(ctx, "vmstat") + lines, _ := common.ReadLines(filename) + for _, l := range lines { + fields := strings.Fields(l) + if len(fields) < 2 { + continue + } + switch fields[0] { + case "pswpin": + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + ret.Sin = value * 4 * 1024 + case "pswpout": + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + ret.Sout = value * 4 * 1024 + case "pgpgin": + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + ret.PgIn = value * 4 * 1024 + case "pgpgout": + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + ret.PgOut = value * 4 * 1024 + case "pgfault": + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + ret.PgFault = value * 4 * 1024 + case "pgmajfault": + value, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + ret.PgMajFault = value * 4 * 1024 + } + } + return ret, nil +} + +// calculateAvailVmem is a fallback under kernel 3.14 where /proc/meminfo does not provide +// "MemAvailable:" column. It reimplements an algorithm from the link below +// https://github.com/giampaolo/psutil/pull/890 +func calculateAvailVmem(ctx context.Context, ret *VirtualMemoryStat, retEx *ExVirtualMemory) uint64 { + var watermarkLow uint64 + + fn := common.HostProcWithContext(ctx, "zoneinfo") + lines, err := common.ReadLines(fn) + if err != nil { + return ret.Free + ret.Cached // fallback under kernel 2.6.13 + } + + pagesize := uint64(os.Getpagesize()) + watermarkLow = 0 + + for _, line := range lines { + fields := strings.Fields(line) + + if strings.HasPrefix(fields[0], "low") { + lowValue, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + lowValue = 0 + } + watermarkLow += lowValue + } + } + + watermarkLow *= pagesize + + availMemory := ret.Free - watermarkLow + pageCache := retEx.ActiveFile + retEx.InactiveFile + pageCache -= uint64(math.Min(float64(pageCache/2), float64(watermarkLow))) + availMemory += pageCache + availMemory += ret.Sreclaimable - uint64(math.Min(float64(ret.Sreclaimable/2.0), float64(watermarkLow))) + + if availMemory < 0 { + availMemory = 0 + } + + return availMemory +} + +const swapsFilename = "swaps" + +// swaps file column indexes +const ( + nameCol = 0 + // typeCol = 1 + totalCol = 2 + usedCol = 3 + // priorityCol = 4 +) + +func SwapDevices() ([]*SwapDevice, error) { + return SwapDevicesWithContext(context.Background()) +} + +func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { + swapsFilePath := common.HostProcWithContext(ctx, swapsFilename) + f, err := os.Open(swapsFilePath) + if err != nil { + return nil, err + } + defer f.Close() + + return parseSwapsFile(ctx, f) +} + +func parseSwapsFile(ctx context.Context, r io.Reader) ([]*SwapDevice, error) { + swapsFilePath := common.HostProcWithContext(ctx, swapsFilename) + scanner := bufio.NewScanner(r) + if !scanner.Scan() { + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("couldn't read file %q: %w", swapsFilePath, err) + } + return nil, fmt.Errorf("unexpected end-of-file in %q", swapsFilePath) + + } + + // Check header headerFields are as expected + headerFields := strings.Fields(scanner.Text()) + if len(headerFields) < usedCol { + return nil, fmt.Errorf("couldn't parse %q: too few fields in header", swapsFilePath) + } + if headerFields[nameCol] != "Filename" { + return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapsFilePath, headerFields[nameCol], "Filename") + } + if headerFields[totalCol] != "Size" { + return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapsFilePath, headerFields[totalCol], "Size") + } + if headerFields[usedCol] != "Used" { + return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapsFilePath, headerFields[usedCol], "Used") + } + + var swapDevices []*SwapDevice + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) < usedCol { + return nil, fmt.Errorf("couldn't parse %q: too few fields", swapsFilePath) + } + + totalKiB, err := strconv.ParseUint(fields[totalCol], 10, 64) + if err != nil { + return nil, fmt.Errorf("couldn't parse 'Size' column in %q: %w", swapsFilePath, err) + } + + usedKiB, err := strconv.ParseUint(fields[usedCol], 10, 64) + if err != nil { + return nil, fmt.Errorf("couldn't parse 'Used' column in %q: %w", swapsFilePath, err) + } + + swapDevices = append(swapDevices, &SwapDevice{ + Name: fields[nameCol], + UsedBytes: usedKiB * 1024, + FreeBytes: (totalKiB - usedKiB) * 1024, + }) + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("couldn't read file %q: %w", swapsFilePath, err) + } + + return swapDevices, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go new file mode 100644 index 0000000000000..0a41b3e340e5f --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_netbsd.go @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build netbsd + +package mem + +import ( + "context" + "errors" + "fmt" + + "golang.org/x/sys/unix" +) + +func GetPageSize() (uint64, error) { + return GetPageSizeWithContext(context.Background()) +} + +func GetPageSizeWithContext(ctx context.Context) (uint64, error) { + uvmexp, err := unix.SysctlUvmexp("vm.uvmexp2") + if err != nil { + return 0, err + } + return uint64(uvmexp.Pagesize), nil +} + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + uvmexp, err := unix.SysctlUvmexp("vm.uvmexp2") + if err != nil { + return nil, err + } + p := uint64(uvmexp.Pagesize) + + ret := &VirtualMemoryStat{ + Total: uint64(uvmexp.Npages) * p, + Free: uint64(uvmexp.Free) * p, + Active: uint64(uvmexp.Active) * p, + Inactive: uint64(uvmexp.Inactive) * p, + Cached: 0, // not available + Wired: uint64(uvmexp.Wired) * p, + } + + ret.Available = ret.Inactive + ret.Cached + ret.Free + ret.Used = ret.Total - ret.Available + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 + + // Get buffers from vm.bufmem sysctl + ret.Buffers, err = unix.SysctlUint64("vm.bufmem") + if err != nil { + return nil, err + } + + return ret, nil +} + +// Return swapctl summary info +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + out, err := invoke.CommandWithContext(ctx, "swapctl", "-sk") + if err != nil { + return &SwapMemoryStat{}, nil + } + + line := string(out) + var total, used, free uint64 + + _, err = fmt.Sscanf(line, + "total: %d 1K-blocks allocated, %d used, %d available", + &total, &used, &free) + if err != nil { + return nil, errors.New("failed to parse swapctl output") + } + + percent := float64(used) / float64(total) * 100 + return &SwapMemoryStat{ + Total: total * 1024, + Used: used * 1024, + Free: free * 1024, + UsedPercent: percent, + }, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go new file mode 100644 index 0000000000000..2510bb0d3aa89 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd.go @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd + +package mem + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + + "github.com/shirou/gopsutil/v4/internal/common" + "golang.org/x/sys/unix" +) + +func GetPageSize() (uint64, error) { + return GetPageSizeWithContext(context.Background()) +} + +func GetPageSizeWithContext(ctx context.Context) (uint64, error) { + uvmexp, err := unix.SysctlUvmexp("vm.uvmexp") + if err != nil { + return 0, err + } + return uint64(uvmexp.Pagesize), nil +} + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + uvmexp, err := unix.SysctlUvmexp("vm.uvmexp") + if err != nil { + return nil, err + } + p := uint64(uvmexp.Pagesize) + + ret := &VirtualMemoryStat{ + Total: uint64(uvmexp.Npages) * p, + Free: uint64(uvmexp.Free) * p, + Active: uint64(uvmexp.Active) * p, + Inactive: uint64(uvmexp.Inactive) * p, + Cached: 0, // not available + Wired: uint64(uvmexp.Wired) * p, + } + + ret.Available = ret.Inactive + ret.Cached + ret.Free + ret.Used = ret.Total - ret.Available + ret.UsedPercent = float64(ret.Used) / float64(ret.Total) * 100.0 + + mib := []int32{CTLVfs, VfsGeneric, VfsBcacheStat} + buf, length, err := common.CallSyscall(mib) + if err != nil { + return nil, err + } + if length < sizeOfBcachestats { + return nil, fmt.Errorf("short syscall ret %d bytes", length) + } + var bcs Bcachestats + br := bytes.NewReader(buf) + err = common.Read(br, binary.LittleEndian, &bcs) + if err != nil { + return nil, err + } + ret.Buffers = uint64(bcs.Numbufpages) * p + + return ret, nil +} + +// Return swapctl summary info +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + out, err := invoke.CommandWithContext(ctx, "swapctl", "-sk") + if err != nil { + return &SwapMemoryStat{}, nil + } + + line := string(out) + var total, used, free uint64 + + _, err = fmt.Sscanf(line, + "total: %d 1K-blocks allocated, %d used, %d available", + &total, &used, &free) + if err != nil { + return nil, errors.New("failed to parse swapctl output") + } + + percent := float64(used) / float64(total) * 100 + return &SwapMemoryStat{ + Total: total * 1024, + Used: used * 1024, + Free: free * 1024, + UsedPercent: percent, + }, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go new file mode 100644 index 0000000000000..552e93f4a2819 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_386.go @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd && 386 + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs mem/types_openbsd.go + +package mem + +const ( + CTLVfs = 10 + VfsGeneric = 0 + VfsBcacheStat = 3 +) + +const ( + sizeOfBcachestats = 0x90 +) + +type Bcachestats struct { + Numbufs int64 + Numbufpages int64 + Numdirtypages int64 + Numcleanpages int64 + Pendingwrites int64 + Pendingreads int64 + Numwrites int64 + Numreads int64 + Cachehits int64 + Busymapped int64 + Dmapages int64 + Highpages int64 + Delwribufs int64 + Kvaslots int64 + Avail int64 + Highflips int64 + Highflops int64 + Dmaflips int64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go new file mode 100644 index 0000000000000..73e5b72aa67f2 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_amd64.go @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: BSD-3-Clause +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +package mem + +const ( + CTLVfs = 10 + VfsGeneric = 0 + VfsBcacheStat = 3 +) + +const ( + sizeOfBcachestats = 0x78 +) + +type Bcachestats struct { + Numbufs int64 + Numbufpages int64 + Numdirtypages int64 + Numcleanpages int64 + Pendingwrites int64 + Pendingreads int64 + Numwrites int64 + Numreads int64 + Cachehits int64 + Busymapped int64 + Dmapages int64 + Highpages int64 + Delwribufs int64 + Kvaslots int64 + Avail int64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go new file mode 100644 index 0000000000000..57b5861de5bb8 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm.go @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd && arm + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs mem/types_openbsd.go + +package mem + +const ( + CTLVfs = 10 + VfsGeneric = 0 + VfsBcacheStat = 3 +) + +const ( + sizeOfBcachestats = 0x90 +) + +type Bcachestats struct { + Numbufs int64 + Numbufpages int64 + Numdirtypages int64 + Numcleanpages int64 + Pendingwrites int64 + Pendingreads int64 + Numwrites int64 + Numreads int64 + Cachehits int64 + Busymapped int64 + Dmapages int64 + Highpages int64 + Delwribufs int64 + Kvaslots int64 + Avail int64 + Highflips int64 + Highflops int64 + Dmaflips int64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go new file mode 100644 index 0000000000000..f39a6456b738a --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_arm64.go @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd && arm64 + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs mem/types_openbsd.go + +package mem + +const ( + CTLVfs = 10 + VfsGeneric = 0 + VfsBcacheStat = 3 +) + +const ( + sizeOfBcachestats = 0x90 +) + +type Bcachestats struct { + Numbufs int64 + Numbufpages int64 + Numdirtypages int64 + Numcleanpages int64 + Pendingwrites int64 + Pendingreads int64 + Numwrites int64 + Numreads int64 + Cachehits int64 + Busymapped int64 + Dmapages int64 + Highpages int64 + Delwribufs int64 + Kvaslots int64 + Avail int64 + Highflips int64 + Highflops int64 + Dmaflips int64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go new file mode 100644 index 0000000000000..f9f838f54ed72 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_openbsd_riscv64.go @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd && riscv64 + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs mem/types_openbsd.go + +package mem + +const ( + CTLVfs = 10 + VfsGeneric = 0 + VfsBcacheStat = 3 +) + +const ( + sizeOfBcachestats = 0x90 +) + +type Bcachestats struct { + Numbufs int64 + Numbufpages int64 + Numdirtypages int64 + Numcleanpages int64 + Pendingwrites int64 + Pendingreads int64 + Numwrites int64 + Numreads int64 + Cachehits int64 + Busymapped int64 + Dmapages int64 + Highpages int64 + Delwribufs int64 + Kvaslots int64 + Avail int64 + Highflips int64 + Highflops int64 + Dmaflips int64 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go new file mode 100644 index 0000000000000..c17a102ee6259 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_plan9.go @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build plan9 + +package mem + +import ( + "context" + "os" + + stats "github.com/lufia/plan9stats" + "github.com/shirou/gopsutil/v4/internal/common" +) + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + root := os.Getenv("HOST_ROOT") + m, err := stats.ReadMemStats(ctx, stats.WithRootDir(root)) + if err != nil { + return nil, err + } + u := 0.0 + if m.SwapPages.Avail != 0 { + u = float64(m.SwapPages.Used) / float64(m.SwapPages.Avail) * 100.0 + } + return &SwapMemoryStat{ + Total: uint64(m.SwapPages.Avail * m.PageSize), + Used: uint64(m.SwapPages.Used * m.PageSize), + Free: uint64(m.SwapPages.Free() * m.PageSize), + UsedPercent: u, + }, nil +} + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + root := os.Getenv("HOST_ROOT") + m, err := stats.ReadMemStats(ctx, stats.WithRootDir(root)) + if err != nil { + return nil, err + } + u := 0.0 + if m.UserPages.Avail != 0 { + u = float64(m.UserPages.Used) / float64(m.UserPages.Avail) * 100.0 + } + return &VirtualMemoryStat{ + Total: uint64(m.Total), + Available: uint64(m.UserPages.Free() * m.PageSize), + Used: uint64(m.UserPages.Used * m.PageSize), + UsedPercent: u, + Free: uint64(m.UserPages.Free() * m.PageSize), + + SwapTotal: uint64(m.SwapPages.Avail * m.PageSize), + SwapFree: uint64(m.SwapPages.Free() * m.PageSize), + }, nil +} + +func SwapDevices() ([]*SwapDevice, error) { + return SwapDevicesWithContext(context.Background()) +} + +func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go new file mode 100644 index 0000000000000..06d0d9a006be3 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_solaris.go @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build solaris + +package mem + +import ( + "context" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/tklauser/go-sysconf" +) + +// VirtualMemory for Solaris is a minimal implementation which only returns +// what Nomad needs. It does take into account global vs zone, however. +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + result := &VirtualMemoryStat{} + + zoneName, err := zoneName() + if err != nil { + return nil, err + } + + if zoneName == "global" { + cap, err := globalZoneMemoryCapacity() + if err != nil { + return nil, err + } + result.Total = cap + freemem, err := globalZoneFreeMemory(ctx) + if err != nil { + return nil, err + } + result.Available = freemem + result.Free = freemem + result.Used = result.Total - result.Free + } else { + cap, err := nonGlobalZoneMemoryCapacity() + if err != nil { + return nil, err + } + result.Total = cap + } + + return result, nil +} + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + return nil, common.ErrNotImplementedError +} + +func zoneName() (string, error) { + ctx := context.Background() + out, err := invoke.CommandWithContext(ctx, "zonename") + if err != nil { + return "", err + } + + return strings.TrimSpace(string(out)), nil +} + +var globalZoneMemoryCapacityMatch = regexp.MustCompile(`[Mm]emory size: (\d+) Megabytes`) + +func globalZoneMemoryCapacity() (uint64, error) { + ctx := context.Background() + out, err := invoke.CommandWithContext(ctx, "prtconf") + if err != nil { + return 0, err + } + + match := globalZoneMemoryCapacityMatch.FindAllStringSubmatch(string(out), -1) + if len(match) != 1 { + return 0, errors.New("memory size not contained in output of prtconf") + } + + totalMB, err := strconv.ParseUint(match[0][1], 10, 64) + if err != nil { + return 0, err + } + + return totalMB * 1024 * 1024, nil +} + +func globalZoneFreeMemory(ctx context.Context) (uint64, error) { + output, err := invoke.CommandWithContext(ctx, "pagesize") + if err != nil { + return 0, err + } + + pagesize, err := strconv.ParseUint(strings.TrimSpace(string(output)), 10, 64) + if err != nil { + return 0, err + } + + free, err := sysconf.Sysconf(sysconf.SC_AVPHYS_PAGES) + if err != nil { + return 0, err + } + + return uint64(free) * pagesize, nil +} + +var kstatMatch = regexp.MustCompile(`(\S+)\s+(\S*)`) + +func nonGlobalZoneMemoryCapacity() (uint64, error) { + ctx := context.Background() + out, err := invoke.CommandWithContext(ctx, "kstat", "-p", "-c", "zone_memory_cap", "memory_cap:*:*:physcap") + if err != nil { + return 0, err + } + + kstats := kstatMatch.FindAllStringSubmatch(string(out), -1) + if len(kstats) != 1 { + return 0, fmt.Errorf("expected 1 kstat, found %d", len(kstats)) + } + + memSizeBytes, err := strconv.ParseUint(kstats[0][2], 10, 64) + if err != nil { + return 0, err + } + + return memSizeBytes, nil +} + +const swapCommand = "swap" + +// The blockSize as reported by `swap -l`. See https://docs.oracle.com/cd/E23824_01/html/821-1459/fsswap-52195.html +const blockSize = 512 + +// swapctl column indexes +const ( + nameCol = 0 + // devCol = 1 + // swaploCol = 2 + totalBlocksCol = 3 + freeBlocksCol = 4 +) + +func SwapDevices() ([]*SwapDevice, error) { + return SwapDevicesWithContext(context.Background()) +} + +func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { + output, err := invoke.CommandWithContext(ctx, swapCommand, "-l") + if err != nil { + return nil, fmt.Errorf("could not execute %q: %w", swapCommand, err) + } + + return parseSwapsCommandOutput(string(output)) +} + +func parseSwapsCommandOutput(output string) ([]*SwapDevice, error) { + lines := strings.Split(output, "\n") + if len(lines) == 0 { + return nil, fmt.Errorf("could not parse output of %q: no lines in %q", swapCommand, output) + } + + // Check header headerFields are as expected. + headerFields := strings.Fields(lines[0]) + if len(headerFields) < freeBlocksCol { + return nil, fmt.Errorf("couldn't parse %q: too few fields in header %q", swapCommand, lines[0]) + } + if headerFields[nameCol] != "swapfile" { + return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapCommand, headerFields[nameCol], "swapfile") + } + if headerFields[totalBlocksCol] != "blocks" { + return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapCommand, headerFields[totalBlocksCol], "blocks") + } + if headerFields[freeBlocksCol] != "free" { + return nil, fmt.Errorf("couldn't parse %q: expected %q to be %q", swapCommand, headerFields[freeBlocksCol], "free") + } + + var swapDevices []*SwapDevice + for _, line := range lines[1:] { + if line == "" { + continue // the terminal line is typically empty + } + fields := strings.Fields(line) + if len(fields) < freeBlocksCol { + return nil, fmt.Errorf("couldn't parse %q: too few fields", swapCommand) + } + + totalBlocks, err := strconv.ParseUint(fields[totalBlocksCol], 10, 64) + if err != nil { + return nil, fmt.Errorf("couldn't parse 'Size' column in %q: %w", swapCommand, err) + } + + freeBlocks, err := strconv.ParseUint(fields[freeBlocksCol], 10, 64) + if err != nil { + return nil, fmt.Errorf("couldn't parse 'Used' column in %q: %w", swapCommand, err) + } + + swapDevices = append(swapDevices, &SwapDevice{ + Name: fields[nameCol], + UsedBytes: (totalBlocks - freeBlocks) * blockSize, + FreeBytes: freeBlocks * blockSize, + }) + } + + return swapDevices, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go b/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go new file mode 100644 index 0000000000000..4666cbd01e8bc --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/mem/mem_windows.go @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build windows + +package mem + +import ( + "context" + "sync" + "syscall" + "unsafe" + + "github.com/shirou/gopsutil/v4/internal/common" + "golang.org/x/sys/windows" +) + +var ( + procEnumPageFilesW = common.ModPsapi.NewProc("EnumPageFilesW") + procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") + procGetPerformanceInfo = common.ModPsapi.NewProc("GetPerformanceInfo") + procGlobalMemoryStatusEx = common.Modkernel32.NewProc("GlobalMemoryStatusEx") +) + +type memoryStatusEx struct { + cbSize uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 // in bytes + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +func VirtualMemory() (*VirtualMemoryStat, error) { + return VirtualMemoryWithContext(context.Background()) +} + +func VirtualMemoryWithContext(ctx context.Context) (*VirtualMemoryStat, error) { + var memInfo memoryStatusEx + memInfo.cbSize = uint32(unsafe.Sizeof(memInfo)) + mem, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))) + if mem == 0 { + return nil, windows.GetLastError() + } + + ret := &VirtualMemoryStat{ + Total: memInfo.ullTotalPhys, + Available: memInfo.ullAvailPhys, + Free: memInfo.ullAvailPhys, + UsedPercent: float64(memInfo.dwMemoryLoad), + } + + ret.Used = ret.Total - ret.Available + return ret, nil +} + +type performanceInformation struct { + cb uint32 + commitTotal uint64 + commitLimit uint64 + commitPeak uint64 + physicalTotal uint64 + physicalAvailable uint64 + systemCache uint64 + kernelTotal uint64 + kernelPaged uint64 + kernelNonpaged uint64 + pageSize uint64 + handleCount uint32 + processCount uint32 + threadCount uint32 +} + +func SwapMemory() (*SwapMemoryStat, error) { + return SwapMemoryWithContext(context.Background()) +} + +func SwapMemoryWithContext(ctx context.Context) (*SwapMemoryStat, error) { + var perfInfo performanceInformation + perfInfo.cb = uint32(unsafe.Sizeof(perfInfo)) + mem, _, _ := procGetPerformanceInfo.Call(uintptr(unsafe.Pointer(&perfInfo)), uintptr(perfInfo.cb)) + if mem == 0 { + return nil, windows.GetLastError() + } + tot := perfInfo.commitLimit * perfInfo.pageSize + used := perfInfo.commitTotal * perfInfo.pageSize + free := tot - used + var usedPercent float64 + if tot == 0 { + usedPercent = 0 + } else { + usedPercent = float64(used) / float64(tot) * 100 + } + ret := &SwapMemoryStat{ + Total: tot, + Used: used, + Free: free, + UsedPercent: usedPercent, + } + + return ret, nil +} + +var ( + pageSize uint64 + pageSizeOnce sync.Once +) + +type systemInfo struct { + wProcessorArchitecture uint16 + wReserved uint16 + dwPageSize uint32 + lpMinimumApplicationAddress uintptr + lpMaximumApplicationAddress uintptr + dwActiveProcessorMask uintptr + dwNumberOfProcessors uint32 + dwProcessorType uint32 + dwAllocationGranularity uint32 + wProcessorLevel uint16 + wProcessorRevision uint16 +} + +// system type as defined in https://docs.microsoft.com/en-us/windows/win32/api/psapi/ns-psapi-enum_page_file_information +type enumPageFileInformation struct { + cb uint32 + reserved uint32 + totalSize uint64 + totalInUse uint64 + peakUsage uint64 +} + +func SwapDevices() ([]*SwapDevice, error) { + return SwapDevicesWithContext(context.Background()) +} + +func SwapDevicesWithContext(ctx context.Context) ([]*SwapDevice, error) { + pageSizeOnce.Do(func() { + var sysInfo systemInfo + procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&sysInfo))) + pageSize = uint64(sysInfo.dwPageSize) + }) + + // the following system call invokes the supplied callback function once for each page file before returning + // see https://docs.microsoft.com/en-us/windows/win32/api/psapi/nf-psapi-enumpagefilesw + var swapDevices []*SwapDevice + result, _, _ := procEnumPageFilesW.Call(windows.NewCallback(pEnumPageFileCallbackW), uintptr(unsafe.Pointer(&swapDevices))) + if result == 0 { + return nil, windows.GetLastError() + } + + return swapDevices, nil +} + +// system callback as defined in https://docs.microsoft.com/en-us/windows/win32/api/psapi/nc-psapi-penum_page_file_callbackw +func pEnumPageFileCallbackW(swapDevices *[]*SwapDevice, enumPageFileInfo *enumPageFileInformation, lpFilenamePtr *[syscall.MAX_LONG_PATH]uint16) *bool { + *swapDevices = append(*swapDevices, &SwapDevice{ + Name: syscall.UTF16ToString((*lpFilenamePtr)[:]), + UsedBytes: enumPageFileInfo.totalInUse * pageSize, + FreeBytes: (enumPageFileInfo.totalSize - enumPageFileInfo.totalInUse) * pageSize, + }) + + // return true to continue enumerating page files + ret := true + return &ret +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net.go b/vendor/github.com/shirou/gopsutil/v4/net/net.go new file mode 100644 index 0000000000000..3890eda5308fe --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net.go @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: BSD-3-Clause +package net + +import ( + "context" + "encoding/json" + "net" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +var invoke common.Invoker = common.Invoke{} + +type IOCountersStat struct { + Name string `json:"name"` // interface name + BytesSent uint64 `json:"bytesSent"` // number of bytes sent + BytesRecv uint64 `json:"bytesRecv"` // number of bytes received + PacketsSent uint64 `json:"packetsSent"` // number of packets sent + PacketsRecv uint64 `json:"packetsRecv"` // number of packets received + Errin uint64 `json:"errin"` // total number of errors while receiving + Errout uint64 `json:"errout"` // total number of errors while sending + Dropin uint64 `json:"dropin"` // total number of incoming packets which were dropped + Dropout uint64 `json:"dropout"` // total number of outgoing packets which were dropped (always 0 on OSX and BSD) + Fifoin uint64 `json:"fifoin"` // total number of FIFO buffers errors while receiving + Fifoout uint64 `json:"fifoout"` // total number of FIFO buffers errors while sending +} + +// Addr is implemented compatibility to psutil +type Addr struct { + IP string `json:"ip"` + Port uint32 `json:"port"` +} + +type ConnectionStat struct { + Fd uint32 `json:"fd"` + Family uint32 `json:"family"` + Type uint32 `json:"type"` + Laddr Addr `json:"localaddr"` + Raddr Addr `json:"remoteaddr"` + Status string `json:"status"` + Uids []int32 `json:"uids"` + Pid int32 `json:"pid"` +} + +// System wide stats about different network protocols +type ProtoCountersStat struct { + Protocol string `json:"protocol"` + Stats map[string]int64 `json:"stats"` +} + +// NetInterfaceAddr is designed for represent interface addresses +type InterfaceAddr struct { + Addr string `json:"addr"` +} + +// InterfaceAddrList is a list of InterfaceAddr +type InterfaceAddrList []InterfaceAddr + +type InterfaceStat struct { + Index int `json:"index"` + MTU int `json:"mtu"` // maximum transmission unit + Name string `json:"name"` // e.g., "en0", "lo0", "eth0.100" + HardwareAddr string `json:"hardwareAddr"` // IEEE MAC-48, EUI-48 and EUI-64 form + Flags []string `json:"flags"` // e.g., FlagUp, FlagLoopback, FlagMulticast + Addrs InterfaceAddrList `json:"addrs"` +} + +// InterfaceStatList is a list of InterfaceStat +type InterfaceStatList []InterfaceStat + +type FilterStat struct { + ConnTrackCount int64 `json:"connTrackCount"` + ConnTrackMax int64 `json:"connTrackMax"` +} + +// ConntrackStat has conntrack summary info +type ConntrackStat struct { + Entries uint32 `json:"entries"` // Number of entries in the conntrack table + Searched uint32 `json:"searched"` // Number of conntrack table lookups performed + Found uint32 `json:"found"` // Number of searched entries which were successful + New uint32 `json:"new"` // Number of entries added which were not expected before + Invalid uint32 `json:"invalid"` // Number of packets seen which can not be tracked + Ignore uint32 `json:"ignore"` // Packets seen which are already connected to an entry + Delete uint32 `json:"delete"` // Number of entries which were removed + DeleteList uint32 `json:"deleteList"` // Number of entries which were put to dying list + Insert uint32 `json:"insert"` // Number of entries inserted into the list + InsertFailed uint32 `json:"insertFailed"` // # insertion attempted but failed (same entry exists) + Drop uint32 `json:"drop"` // Number of packets dropped due to conntrack failure. + EarlyDrop uint32 `json:"earlyDrop"` // Dropped entries to make room for new ones, if maxsize reached + IcmpError uint32 `json:"icmpError"` // Subset of invalid. Packets that can't be tracked d/t error + ExpectNew uint32 `json:"expectNew"` // Entries added after an expectation was already present + ExpectCreate uint32 `json:"expectCreate"` // Expectations added + ExpectDelete uint32 `json:"expectDelete"` // Expectations deleted + SearchRestart uint32 `json:"searchRestart"` // Conntrack table lookups restarted due to hashtable resizes +} + +func NewConntrackStat(e uint32, s uint32, f uint32, n uint32, inv uint32, ign uint32, del uint32, dlst uint32, ins uint32, insfail uint32, drop uint32, edrop uint32, ie uint32, en uint32, ec uint32, ed uint32, sr uint32) *ConntrackStat { + return &ConntrackStat{ + Entries: e, + Searched: s, + Found: f, + New: n, + Invalid: inv, + Ignore: ign, + Delete: del, + DeleteList: dlst, + Insert: ins, + InsertFailed: insfail, + Drop: drop, + EarlyDrop: edrop, + IcmpError: ie, + ExpectNew: en, + ExpectCreate: ec, + ExpectDelete: ed, + SearchRestart: sr, + } +} + +type ConntrackStatList struct { + items []*ConntrackStat +} + +func NewConntrackStatList() *ConntrackStatList { + return &ConntrackStatList{ + items: []*ConntrackStat{}, + } +} + +func (l *ConntrackStatList) Append(c *ConntrackStat) { + l.items = append(l.items, c) +} + +func (l *ConntrackStatList) Items() []ConntrackStat { + items := make([]ConntrackStat, len(l.items)) + for i, el := range l.items { + items[i] = *el + } + return items +} + +// Summary returns a single-element list with totals from all list items. +func (l *ConntrackStatList) Summary() []ConntrackStat { + summary := NewConntrackStat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + for _, cs := range l.items { + summary.Entries += cs.Entries + summary.Searched += cs.Searched + summary.Found += cs.Found + summary.New += cs.New + summary.Invalid += cs.Invalid + summary.Ignore += cs.Ignore + summary.Delete += cs.Delete + summary.DeleteList += cs.DeleteList + summary.Insert += cs.Insert + summary.InsertFailed += cs.InsertFailed + summary.Drop += cs.Drop + summary.EarlyDrop += cs.EarlyDrop + summary.IcmpError += cs.IcmpError + summary.ExpectNew += cs.ExpectNew + summary.ExpectCreate += cs.ExpectCreate + summary.ExpectDelete += cs.ExpectDelete + summary.SearchRestart += cs.SearchRestart + } + return []ConntrackStat{*summary} +} + +func (n IOCountersStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (n ConnectionStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (n ProtoCountersStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (a Addr) String() string { + s, _ := json.Marshal(a) + return string(s) +} + +func (n InterfaceStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (l InterfaceStatList) String() string { + s, _ := json.Marshal(l) + return string(s) +} + +func (n InterfaceAddr) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func (n ConntrackStat) String() string { + s, _ := json.Marshal(n) + return string(s) +} + +func Interfaces() (InterfaceStatList, error) { + return InterfacesWithContext(context.Background()) +} + +func InterfacesWithContext(ctx context.Context) (InterfaceStatList, error) { + is, err := net.Interfaces() + if err != nil { + return nil, err + } + ret := make(InterfaceStatList, 0, len(is)) + for _, ifi := range is { + + var flags []string + if ifi.Flags&net.FlagUp != 0 { + flags = append(flags, "up") + } + if ifi.Flags&net.FlagBroadcast != 0 { + flags = append(flags, "broadcast") + } + if ifi.Flags&net.FlagLoopback != 0 { + flags = append(flags, "loopback") + } + if ifi.Flags&net.FlagPointToPoint != 0 { + flags = append(flags, "pointtopoint") + } + if ifi.Flags&net.FlagMulticast != 0 { + flags = append(flags, "multicast") + } + + r := InterfaceStat{ + Index: ifi.Index, + Name: ifi.Name, + MTU: ifi.MTU, + HardwareAddr: ifi.HardwareAddr.String(), + Flags: flags, + } + addrs, err := ifi.Addrs() + if err == nil { + r.Addrs = make(InterfaceAddrList, 0, len(addrs)) + for _, addr := range addrs { + r.Addrs = append(r.Addrs, InterfaceAddr{ + Addr: addr.String(), + }) + } + + } + ret = append(ret, r) + } + + return ret, nil +} + +func getIOCountersAll(n []IOCountersStat) ([]IOCountersStat, error) { + r := IOCountersStat{ + Name: "all", + } + for _, nic := range n { + r.BytesRecv += nic.BytesRecv + r.PacketsRecv += nic.PacketsRecv + r.Errin += nic.Errin + r.Dropin += nic.Dropin + r.BytesSent += nic.BytesSent + r.PacketsSent += nic.PacketsSent + r.Errout += nic.Errout + r.Dropout += nic.Dropout + } + + return []IOCountersStat{r}, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go new file mode 100644 index 0000000000000..df59abecbe1ad --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix.go @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix + +package net + +import ( + "context" + "fmt" + "regexp" + "strconv" + "strings" + "syscall" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +// IOCountersByFile exists just for compatibility with Linux. +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCounters(pernic) +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func parseNetstatNetLine(line string) (ConnectionStat, error) { + f := strings.Fields(line) + if len(f) < 5 { + return ConnectionStat{}, fmt.Errorf("wrong line,%s", line) + } + + var netType, netFamily uint32 + switch f[0] { + case "tcp", "tcp4": + netType = syscall.SOCK_STREAM + netFamily = syscall.AF_INET + case "udp", "udp4": + netType = syscall.SOCK_DGRAM + netFamily = syscall.AF_INET + case "tcp6": + netType = syscall.SOCK_STREAM + netFamily = syscall.AF_INET6 + case "udp6": + netType = syscall.SOCK_DGRAM + netFamily = syscall.AF_INET6 + default: + return ConnectionStat{}, fmt.Errorf("unknown type, %s", f[0]) + } + + laddr, raddr, err := parseNetstatAddr(f[3], f[4], netFamily) + if err != nil { + return ConnectionStat{}, fmt.Errorf("failed to parse netaddr, %s %s", f[3], f[4]) + } + + n := ConnectionStat{ + Fd: uint32(0), // not supported + Family: uint32(netFamily), + Type: uint32(netType), + Laddr: laddr, + Raddr: raddr, + Pid: int32(0), // not supported + } + if len(f) == 6 { + n.Status = f[5] + } + + return n, nil +} + +var portMatch = regexp.MustCompile(`(.*)\.(\d+)$`) + +// This function only works for netstat returning addresses with a "." +// before the port (0.0.0.0.22 instead of 0.0.0.0:22). +func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, raddr Addr, err error) { + parse := func(l string) (Addr, error) { + matches := portMatch.FindStringSubmatch(l) + if matches == nil { + return Addr{}, fmt.Errorf("wrong addr, %s", l) + } + host := matches[1] + port := matches[2] + if host == "*" { + switch family { + case syscall.AF_INET: + host = "0.0.0.0" + case syscall.AF_INET6: + host = "::" + default: + return Addr{}, fmt.Errorf("unknown family, %d", family) + } + } + lport, err := strconv.Atoi(port) + if err != nil { + return Addr{}, err + } + return Addr{IP: host, Port: uint32(lport)}, nil + } + + laddr, err = parse(local) + if remote != "*.*" { // remote addr exists + raddr, err = parse(remote) + if err != nil { + return laddr, raddr, err + } + } + + return laddr, raddr, err +} + +func parseNetstatUnixLine(f []string) (ConnectionStat, error) { + if len(f) < 8 { + return ConnectionStat{}, fmt.Errorf("wrong number of fields: expected >=8 got %d", len(f)) + } + + var netType uint32 + + switch f[1] { + case "dgram": + netType = syscall.SOCK_DGRAM + case "stream": + netType = syscall.SOCK_STREAM + default: + return ConnectionStat{}, fmt.Errorf("unknown type: %s", f[1]) + } + + // Some Unix Socket don't have any address associated + addr := "" + if len(f) == 9 { + addr = f[8] + } + + c := ConnectionStat{ + Fd: uint32(0), // not supported + Family: uint32(syscall.AF_UNIX), + Type: uint32(netType), + Laddr: Addr{ + IP: addr, + }, + Status: "NONE", + Pid: int32(0), // not supported + } + + return c, nil +} + +// Return true if proto is the corresponding to the kind parameter +// Only for Inet lines +func hasCorrectInetProto(kind, proto string) bool { + switch kind { + case "all", "inet": + return true + case "unix": + return false + case "inet4": + return !strings.HasSuffix(proto, "6") + case "inet6": + return strings.HasSuffix(proto, "6") + case "tcp": + return proto == "tcp" || proto == "tcp4" || proto == "tcp6" + case "tcp4": + return proto == "tcp" || proto == "tcp4" + case "tcp6": + return proto == "tcp6" + case "udp": + return proto == "udp" || proto == "udp4" || proto == "udp6" + case "udp4": + return proto == "udp" || proto == "udp4" + case "udp6": + return proto == "udp6" + } + return false +} + +func parseNetstatA(output string, kind string) ([]ConnectionStat, error) { + var ret []ConnectionStat + lines := strings.Split(string(output), "\n") + + for _, line := range lines { + fields := strings.Fields(line) + if len(fields) < 1 { + continue + } + + if strings.HasPrefix(fields[0], "f1") { + // Unix lines + if len(fields) < 2 { + // every unix connections have two lines + continue + } + + c, err := parseNetstatUnixLine(fields) + if err != nil { + return nil, fmt.Errorf("failed to parse Unix Address (%s): %s", line, err) + } + + ret = append(ret, c) + + } else if strings.HasPrefix(fields[0], "tcp") || strings.HasPrefix(fields[0], "udp") { + // Inet lines + if !hasCorrectInetProto(kind, fields[0]) { + continue + } + + // On AIX, netstat display some connections with "*.*" as local addresses + // Skip them as they aren't real connections. + if fields[3] == "*.*" { + continue + } + + c, err := parseNetstatNetLine(line) + if err != nil { + return nil, fmt.Errorf("failed to parse Inet Address (%s): %s", line, err) + } + + ret = append(ret, c) + } else { + // Header lines + continue + } + } + + return ret, nil +} + +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + args := []string{"-na"} + switch strings.ToLower(kind) { + default: + fallthrough + case "": + kind = "all" + case "all": + // nothing to add + case "inet", "inet4", "inet6": + args = append(args, "-finet") + case "tcp", "tcp4", "tcp6": + args = append(args, "-finet") + case "udp", "udp4", "udp6": + args = append(args, "-finet") + case "unix": + args = append(args, "-funix") + } + + out, err := invoke.CommandWithContext(ctx, "netstat", args...) + if err != nil { + return nil, err + } + + ret, err := parseNetstatA(string(out), kind) + if err != nil { + return nil, err + } + + return ret, nil +} + +func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +// Return a list of network connections opened, omitting `Uids`. +// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be +// removed from the API in the future. +func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { + return ConnectionsWithoutUidsWithContext(context.Background(), kind) +} + +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) +} + +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +} + +func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +} + +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go new file mode 100644 index 0000000000000..a45a5b75cced8 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_cgo.go @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix && cgo + +package net + +import ( + "context" + + "github.com/power-devops/perfstat" +) + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + ifs, err := perfstat.NetIfaceStat() + if err != nil { + return nil, err + } + + iocounters := make([]IOCountersStat, 0, len(ifs)) + for _, netif := range ifs { + n := IOCountersStat{ + Name: netif.Name, + BytesSent: uint64(netif.OBytes), + BytesRecv: uint64(netif.IBytes), + PacketsSent: uint64(netif.OPackets), + PacketsRecv: uint64(netif.IPackets), + Errin: uint64(netif.OErrors), + Errout: uint64(netif.IErrors), + Dropout: uint64(netif.XmitDrops), + } + iocounters = append(iocounters, n) + } + if pernic == false { + return getIOCountersAll(iocounters) + } + return iocounters, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go new file mode 100644 index 0000000000000..f63a21e73bca5 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_aix_nocgo.go @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build aix && !cgo + +package net + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func parseNetstatI(output string) ([]IOCountersStat, error) { + lines := strings.Split(string(output), "\n") + ret := make([]IOCountersStat, 0, len(lines)-1) + exists := make([]string, 0, len(ret)) + + // Check first line is header + if len(lines) > 0 && strings.Fields(lines[0])[0] != "Name" { + return nil, fmt.Errorf("not a 'netstat -i' output") + } + + for _, line := range lines[1:] { + values := strings.Fields(line) + if len(values) < 1 || values[0] == "Name" { + continue + } + if common.StringsHas(exists, values[0]) { + // skip if already get + continue + } + exists = append(exists, values[0]) + + if len(values) < 9 { + continue + } + + base := 1 + // sometimes Address is omitted + if len(values) < 10 { + base = 0 + } + + parsed := make([]uint64, 0, 5) + vv := []string{ + values[base+3], // Ipkts == PacketsRecv + values[base+4], // Ierrs == Errin + values[base+5], // Opkts == PacketsSent + values[base+6], // Oerrs == Errout + values[base+8], // Drops == Dropout + } + + for _, target := range vv { + if target == "-" { + parsed = append(parsed, 0) + continue + } + + t, err := strconv.ParseUint(target, 10, 64) + if err != nil { + return nil, err + } + parsed = append(parsed, t) + } + + n := IOCountersStat{ + Name: values[0], + PacketsRecv: parsed[0], + Errin: parsed[1], + PacketsSent: parsed[2], + Errout: parsed[3], + Dropout: parsed[4], + } + ret = append(ret, n) + } + return ret, nil +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + out, err := invoke.CommandWithContext(ctx, "netstat", "-idn") + if err != nil { + return nil, err + } + + iocounters, err := parseNetstatI(string(out)) + if err != nil { + return nil, err + } + if pernic == false { + return getIOCountersAll(iocounters) + } + return iocounters, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go b/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go new file mode 100644 index 0000000000000..f86b7bf9e3b8e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_darwin.go @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package net + +import ( + "context" + "errors" + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +var ( + errNetstatHeader = errors.New("Can't parse header of netstat output") + netstatLinkRegexp = regexp.MustCompile(`^$`) +) + +const endOfLine = "\n" + +func parseNetstatLine(line string) (stat *IOCountersStat, linkID *uint, err error) { + var ( + numericValue uint64 + columns = strings.Fields(line) + ) + + if columns[0] == "Name" { + err = errNetstatHeader + return + } + + // try to extract the numeric value from + if subMatch := netstatLinkRegexp.FindStringSubmatch(columns[2]); len(subMatch) == 2 { + numericValue, err = strconv.ParseUint(subMatch[1], 10, 64) + if err != nil { + return + } + linkIDUint := uint(numericValue) + linkID = &linkIDUint + } + + base := 1 + numberColumns := len(columns) + // sometimes Address is omitted + if numberColumns < 12 { + base = 0 + } + if numberColumns < 11 || numberColumns > 13 { + err = fmt.Errorf("Line %q do have an invalid number of columns %d", line, numberColumns) + return + } + + parsed := make([]uint64, 0, 7) + vv := []string{ + columns[base+3], // Ipkts == PacketsRecv + columns[base+4], // Ierrs == Errin + columns[base+5], // Ibytes == BytesRecv + columns[base+6], // Opkts == PacketsSent + columns[base+7], // Oerrs == Errout + columns[base+8], // Obytes == BytesSent + } + if len(columns) == 12 { + vv = append(vv, columns[base+10]) + } + + for _, target := range vv { + if target == "-" { + parsed = append(parsed, 0) + continue + } + + if numericValue, err = strconv.ParseUint(target, 10, 64); err != nil { + return + } + parsed = append(parsed, numericValue) + } + + stat = &IOCountersStat{ + Name: strings.Trim(columns[0], "*"), // remove the * that sometimes is on right on interface + PacketsRecv: parsed[0], + Errin: parsed[1], + BytesRecv: parsed[2], + PacketsSent: parsed[3], + Errout: parsed[4], + BytesSent: parsed[5], + } + if len(parsed) == 7 { + stat.Dropout = parsed[6] + } + return +} + +type netstatInterface struct { + linkID *uint + stat *IOCountersStat +} + +func parseNetstatOutput(output string) ([]netstatInterface, error) { + var ( + err error + lines = strings.Split(strings.Trim(output, endOfLine), endOfLine) + ) + + // number of interfaces is number of lines less one for the header + numberInterfaces := len(lines) - 1 + + interfaces := make([]netstatInterface, numberInterfaces) + // no output beside header + if numberInterfaces == 0 { + return interfaces, nil + } + + for index := 0; index < numberInterfaces; index++ { + nsIface := netstatInterface{} + if nsIface.stat, nsIface.linkID, err = parseNetstatLine(lines[index+1]); err != nil { + return nil, err + } + interfaces[index] = nsIface + } + return interfaces, nil +} + +// map that hold the name of a network interface and the number of usage +type mapInterfaceNameUsage map[string]uint + +func newMapInterfaceNameUsage(ifaces []netstatInterface) mapInterfaceNameUsage { + output := make(mapInterfaceNameUsage) + for index := range ifaces { + if ifaces[index].linkID != nil { + ifaceName := ifaces[index].stat.Name + usage, ok := output[ifaceName] + if ok { + output[ifaceName] = usage + 1 + } else { + output[ifaceName] = 1 + } + } + } + return output +} + +func (min mapInterfaceNameUsage) isTruncated() bool { + for _, usage := range min { + if usage > 1 { + return true + } + } + return false +} + +func (min mapInterfaceNameUsage) notTruncated() []string { + output := make([]string, 0) + for ifaceName, usage := range min { + if usage == 1 { + output = append(output, ifaceName) + } + } + return output +} + +// example of `netstat -ibdnW` output on yosemite +// Name Mtu Network Address Ipkts Ierrs Ibytes Opkts Oerrs Obytes Coll Drop +// lo0 16384 869107 0 169411755 869107 0 169411755 0 0 +// lo0 16384 ::1/128 ::1 869107 - 169411755 869107 - 169411755 - - +// lo0 16384 127 127.0.0.1 869107 - 169411755 869107 - 169411755 - - +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + var ( + ret []IOCountersStat + retIndex int + ) + + netstat, err := exec.LookPath("netstat") + if err != nil { + return nil, err + } + + // try to get all interface metrics, and hope there won't be any truncated + out, err := invoke.CommandWithContext(ctx, netstat, "-ibdnW") + if err != nil { + return nil, err + } + + nsInterfaces, err := parseNetstatOutput(string(out)) + if err != nil { + return nil, err + } + + ifaceUsage := newMapInterfaceNameUsage(nsInterfaces) + notTruncated := ifaceUsage.notTruncated() + ret = make([]IOCountersStat, len(notTruncated)) + + if !ifaceUsage.isTruncated() { + // no truncated interface name, return stats of all interface with + for index := range nsInterfaces { + if nsInterfaces[index].linkID != nil { + ret[retIndex] = *nsInterfaces[index].stat + retIndex++ + } + } + } else { + // duplicated interface, list all interfaces + if out, err = invoke.CommandWithContext(ctx, "ifconfig", "-l"); err != nil { + return nil, err + } + interfaceNames := strings.Fields(strings.TrimRight(string(out), endOfLine)) + + // for each of the interface name, run netstat if we don't have any stats yet + for _, interfaceName := range interfaceNames { + truncated := true + for index := range nsInterfaces { + if nsInterfaces[index].linkID != nil && nsInterfaces[index].stat.Name == interfaceName { + // handle the non truncated name to avoid execute netstat for them again + ret[retIndex] = *nsInterfaces[index].stat + retIndex++ + truncated = false + break + } + } + if truncated { + // run netstat with -I$ifacename + if out, err = invoke.CommandWithContext(ctx, netstat, "-ibdnWI"+interfaceName); err != nil { + return nil, err + } + parsedIfaces, err := parseNetstatOutput(string(out)) + if err != nil { + return nil, err + } + if len(parsedIfaces) == 0 { + // interface had been removed since `ifconfig -l` had been executed + continue + } + for index := range parsedIfaces { + if parsedIfaces[index].linkID != nil { + ret = append(ret, *parsedIfaces[index].stat) + break + } + } + } + } + } + + if pernic == false { + return getIOCountersAll(ret) + } + return ret, nil +} + +// IOCountersByFile exists just for compatibility with Linux. +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersWithContext(ctx, pernic) +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +// ProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Not Implemented for Darwin +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go b/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go new file mode 100644 index 0000000000000..e62deeeed3616 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_fallback.go @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build !aix && !darwin && !linux && !freebsd && !openbsd && !windows && !solaris + +package net + +import ( + "context" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + return []IOCountersStat{}, common.ErrNotImplementedError +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return []FilterStat{}, common.ErrNotImplementedError +} + +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return []ProtoCountersStat{}, common.ErrNotImplementedError +} + +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +// Return a list of network connections opened, omitting `Uids`. +// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be +// removed from the API in the future. +func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { + return ConnectionsWithoutUidsWithContext(context.Background(), kind) +} + +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) +} + +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +} + +func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +} + +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go new file mode 100644 index 0000000000000..155a49c404522 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_freebsd.go @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build freebsd + +package net + +import ( + "context" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + out, err := invoke.CommandWithContext(ctx, "netstat", "-ibdnW") + if err != nil { + return nil, err + } + + lines := strings.Split(string(out), "\n") + ret := make([]IOCountersStat, 0, len(lines)-1) + exists := make([]string, 0, len(ret)) + + for _, line := range lines { + values := strings.Fields(line) + if len(values) < 1 || values[0] == "Name" { + continue + } + if common.StringsHas(exists, values[0]) { + // skip if already get + continue + } + exists = append(exists, values[0]) + + if len(values) < 12 { + continue + } + base := 1 + // sometimes Address is omitted + if len(values) < 13 { + base = 0 + } + + parsed := make([]uint64, 0, 8) + vv := []string{ + values[base+3], // PacketsRecv + values[base+4], // Errin + values[base+5], // Dropin + values[base+6], // BytesRecvn + values[base+7], // PacketSent + values[base+8], // Errout + values[base+9], // BytesSent + values[base+11], // Dropout + } + for _, target := range vv { + if target == "-" { + parsed = append(parsed, 0) + continue + } + + t, err := strconv.ParseUint(target, 10, 64) + if err != nil { + return nil, err + } + parsed = append(parsed, t) + } + + n := IOCountersStat{ + Name: values[0], + PacketsRecv: parsed[0], + Errin: parsed[1], + Dropin: parsed[2], + BytesRecv: parsed[3], + PacketsSent: parsed[4], + Errout: parsed[5], + BytesSent: parsed[6], + Dropout: parsed[7], + } + ret = append(ret, n) + } + + if pernic == false { + return getIOCountersAll(ret) + } + + return ret, nil +} + +// IOCountersByFile exists just for compatibility with Linux. +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCounters(pernic) +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +// ProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Not Implemented for FreeBSD +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go new file mode 100644 index 0000000000000..6db04b6279bf9 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_linux.go @@ -0,0 +1,910 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux + +package net + +import ( + "bytes" + "context" + "encoding/hex" + "errors" + "fmt" + "io" + "net" + "os" + "strconv" + "strings" + "syscall" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +const ( // Conntrack Column numbers + ctENTRIES = iota + ctSEARCHED + ctFOUND + ctNEW + ctINVALID + ctIGNORE + ctDELETE + ctDELETE_LIST + ctINSERT + ctINSERT_FAILED + ctDROP + ctEARLY_DROP + ctICMP_ERROR + CT_EXPEctNEW + ctEXPECT_CREATE + CT_EXPEctDELETE + ctSEARCH_RESTART +) + +// NetIOCounters returns network I/O statistics for every network +// interface installed on the system. If pernic argument is false, +// return only sum of all information (which name is 'all'). If true, +// every network interface installed on the system is returned +// separately. +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + filename := common.HostProcWithContext(ctx, "net/dev") + return IOCountersByFileWithContext(ctx, pernic, filename) +} + +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + lines, err := common.ReadLines(filename) + if err != nil { + return nil, err + } + + parts := make([]string, 2) + + statlen := len(lines) - 1 + + ret := make([]IOCountersStat, 0, statlen) + + for _, line := range lines[2:] { + separatorPos := strings.LastIndex(line, ":") + if separatorPos == -1 { + continue + } + parts[0] = line[0:separatorPos] + parts[1] = line[separatorPos+1:] + + interfaceName := strings.TrimSpace(parts[0]) + if interfaceName == "" { + continue + } + + fields := strings.Fields(strings.TrimSpace(parts[1])) + bytesRecv, err := strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return ret, err + } + packetsRecv, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return ret, err + } + errIn, err := strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return ret, err + } + dropIn, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return ret, err + } + fifoIn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return ret, err + } + bytesSent, err := strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return ret, err + } + packetsSent, err := strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return ret, err + } + errOut, err := strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return ret, err + } + dropOut, err := strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return ret, err + } + fifoOut, err := strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return ret, err + } + + nic := IOCountersStat{ + Name: interfaceName, + BytesRecv: bytesRecv, + PacketsRecv: packetsRecv, + Errin: errIn, + Dropin: dropIn, + Fifoin: fifoIn, + BytesSent: bytesSent, + PacketsSent: packetsSent, + Errout: errOut, + Dropout: dropOut, + Fifoout: fifoOut, + } + ret = append(ret, nic) + } + + if !pernic { + return getIOCountersAll(ret) + } + + return ret, nil +} + +var netProtocols = []string{ + "ip", + "icmp", + "icmpmsg", + "tcp", + "udp", + "udplite", +} + +// ProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Available protocols: +// [ip,icmp,icmpmsg,tcp,udp,udplite] +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + if len(protocols) == 0 { + protocols = netProtocols + } + + stats := make([]ProtoCountersStat, 0, len(protocols)) + protos := make(map[string]bool, len(protocols)) + for _, p := range protocols { + protos[p] = true + } + + filename := common.HostProcWithContext(ctx, "net/snmp") + lines, err := common.ReadLines(filename) + if err != nil { + return nil, err + } + + linecount := len(lines) + for i := 0; i < linecount; i++ { + line := lines[i] + r := strings.IndexRune(line, ':') + if r == -1 { + return nil, errors.New(filename + " is not formatted correctly, expected ':'.") + } + proto := strings.ToLower(line[:r]) + if !protos[proto] { + // skip protocol and data line + i++ + continue + } + + // Read header line + statNames := strings.Split(line[r+2:], " ") + + // Read data line + i++ + statValues := strings.Split(lines[i][r+2:], " ") + if len(statNames) != len(statValues) { + return nil, errors.New(filename + " is not formatted correctly, expected same number of columns.") + } + stat := ProtoCountersStat{ + Protocol: proto, + Stats: make(map[string]int64, len(statNames)), + } + for j := range statNames { + value, err := strconv.ParseInt(statValues[j], 10, 64) + if err != nil { + return nil, err + } + stat.Stats[statNames[j]] = value + } + stats = append(stats, stat) + } + return stats, nil +} + +// NetFilterCounters returns iptables conntrack statistics +// the currently in use conntrack count and the max. +// If the file does not exist or is invalid it will return nil. +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + countfile := common.HostProcWithContext(ctx, "sys/net/netfilter/nf_conntrack_count") + maxfile := common.HostProcWithContext(ctx, "sys/net/netfilter/nf_conntrack_max") + + count, err := common.ReadInts(countfile) + if err != nil { + return nil, err + } + stats := make([]FilterStat, 0, 1) + + max, err := common.ReadInts(maxfile) + if err != nil { + return nil, err + } + + payload := FilterStat{ + ConnTrackCount: count[0], + ConnTrackMax: max[0], + } + + stats = append(stats, payload) + return stats, nil +} + +// ConntrackStats returns more detailed info about the conntrack table +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +// ConntrackStatsWithContext returns more detailed info about the conntrack table +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return conntrackStatsFromFile(common.HostProcWithContext(ctx, "net/stat/nf_conntrack"), percpu) +} + +// conntrackStatsFromFile returns more detailed info about the conntrack table +// from `filename` +// If 'percpu' is false, the result will contain exactly one item with totals/summary +func conntrackStatsFromFile(filename string, percpu bool) ([]ConntrackStat, error) { + lines, err := common.ReadLines(filename) + if err != nil { + return nil, err + } + + statlist := NewConntrackStatList() + + for _, line := range lines { + fields := strings.Fields(line) + if len(fields) == 17 && fields[0] != "entries" { + statlist.Append(NewConntrackStat( + common.HexToUint32(fields[ctENTRIES]), + common.HexToUint32(fields[ctSEARCHED]), + common.HexToUint32(fields[ctFOUND]), + common.HexToUint32(fields[ctNEW]), + common.HexToUint32(fields[ctINVALID]), + common.HexToUint32(fields[ctIGNORE]), + common.HexToUint32(fields[ctDELETE]), + common.HexToUint32(fields[ctDELETE_LIST]), + common.HexToUint32(fields[ctINSERT]), + common.HexToUint32(fields[ctINSERT_FAILED]), + common.HexToUint32(fields[ctDROP]), + common.HexToUint32(fields[ctEARLY_DROP]), + common.HexToUint32(fields[ctICMP_ERROR]), + common.HexToUint32(fields[CT_EXPEctNEW]), + common.HexToUint32(fields[ctEXPECT_CREATE]), + common.HexToUint32(fields[CT_EXPEctDELETE]), + common.HexToUint32(fields[ctSEARCH_RESTART]), + )) + } + } + + if percpu { + return statlist.Items(), nil + } + return statlist.Summary(), nil +} + +// http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h +var tcpStatuses = map[string]string{ + "01": "ESTABLISHED", + "02": "SYN_SENT", + "03": "SYN_RECV", + "04": "FIN_WAIT1", + "05": "FIN_WAIT2", + "06": "TIME_WAIT", + "07": "CLOSE", + "08": "CLOSE_WAIT", + "09": "LAST_ACK", + "0A": "LISTEN", + "0B": "CLOSING", +} + +type netConnectionKindType struct { + family uint32 + sockType uint32 + filename string +} + +var kindTCP4 = netConnectionKindType{ + family: syscall.AF_INET, + sockType: syscall.SOCK_STREAM, + filename: "tcp", +} + +var kindTCP6 = netConnectionKindType{ + family: syscall.AF_INET6, + sockType: syscall.SOCK_STREAM, + filename: "tcp6", +} + +var kindUDP4 = netConnectionKindType{ + family: syscall.AF_INET, + sockType: syscall.SOCK_DGRAM, + filename: "udp", +} + +var kindUDP6 = netConnectionKindType{ + family: syscall.AF_INET6, + sockType: syscall.SOCK_DGRAM, + filename: "udp6", +} + +var kindUNIX = netConnectionKindType{ + family: syscall.AF_UNIX, + filename: "unix", +} + +var netConnectionKindMap = map[string][]netConnectionKindType{ + "all": {kindTCP4, kindTCP6, kindUDP4, kindUDP6, kindUNIX}, + "tcp": {kindTCP4, kindTCP6}, + "tcp4": {kindTCP4}, + "tcp6": {kindTCP6}, + "udp": {kindUDP4, kindUDP6}, + "udp4": {kindUDP4}, + "udp6": {kindUDP6}, + "unix": {kindUNIX}, + "inet": {kindTCP4, kindTCP6, kindUDP4, kindUDP6}, + "inet4": {kindTCP4, kindUDP4}, + "inet6": {kindTCP6, kindUDP6}, +} + +type inodeMap struct { + pid int32 + fd uint32 +} + +type connTmp struct { + fd uint32 + family uint32 + sockType uint32 + laddr Addr + raddr Addr + status string + pid int32 + boundPid int32 + path string +} + +// Return a list of network connections opened. +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(ctx, kind, 0) +} + +// Return a list of network connections opened returning at most `max` +// connections for each running process. +func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, 0, max) +} + +// Return a list of network connections opened, omitting `Uids`. +// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be +// removed from the API in the future. +func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { + return ConnectionsWithoutUidsWithContext(context.Background(), kind) +} + +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) +} + +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +} + +// Return a list of network connections opened by a process. +func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +// Return up to `max` network connections opened by a process. +func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) +} + +func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max, false) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max, true) +} + +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int, skipUids bool) ([]ConnectionStat, error) { + tmap, ok := netConnectionKindMap[kind] + if !ok { + return nil, fmt.Errorf("invalid kind, %s", kind) + } + root := common.HostProcWithContext(ctx) + var err error + var inodes map[string][]inodeMap + if pid == 0 { + inodes, err = getProcInodesAllWithContext(ctx, root, max) + } else { + inodes, err = getProcInodes(root, pid, max) + if len(inodes) == 0 { + // no connection for the pid + return []ConnectionStat{}, nil + } + } + if err != nil { + return nil, fmt.Errorf("cound not get pid(s), %d: %w", pid, err) + } + return statsFromInodesWithContext(ctx, root, pid, tmap, inodes, skipUids) +} + +func statsFromInodes(root string, pid int32, tmap []netConnectionKindType, inodes map[string][]inodeMap, skipUids bool) ([]ConnectionStat, error) { + return statsFromInodesWithContext(context.Background(), root, pid, tmap, inodes, skipUids) +} + +func statsFromInodesWithContext(ctx context.Context, root string, pid int32, tmap []netConnectionKindType, inodes map[string][]inodeMap, skipUids bool) ([]ConnectionStat, error) { + dupCheckMap := make(map[string]struct{}) + var ret []ConnectionStat + + var err error + for _, t := range tmap { + var path string + var connKey string + var ls []connTmp + if pid == 0 { + path = fmt.Sprintf("%s/net/%s", root, t.filename) + } else { + path = fmt.Sprintf("%s/%d/net/%s", root, pid, t.filename) + } + switch t.family { + case syscall.AF_INET, syscall.AF_INET6: + ls, err = processInetWithContext(ctx, path, t, inodes, pid) + case syscall.AF_UNIX: + ls, err = processUnix(path, t, inodes, pid) + } + if err != nil { + return nil, err + } + for _, c := range ls { + // Build TCP key to id the connection uniquely + // socket type, src ip, src port, dst ip, dst port and state should be enough + // to prevent duplications. + connKey = fmt.Sprintf("%d-%s:%d-%s:%d-%s", c.sockType, c.laddr.IP, c.laddr.Port, c.raddr.IP, c.raddr.Port, c.status) + if _, ok := dupCheckMap[connKey]; ok { + continue + } + + conn := ConnectionStat{ + Fd: c.fd, + Family: c.family, + Type: c.sockType, + Laddr: c.laddr, + Raddr: c.raddr, + Status: c.status, + Pid: c.pid, + } + if c.pid == 0 { + conn.Pid = c.boundPid + } else { + conn.Pid = c.pid + } + + if !skipUids { + // fetch process owner Real, effective, saved set, and filesystem UIDs + proc := process{Pid: conn.Pid} + conn.Uids, _ = proc.getUids(ctx) + } + + ret = append(ret, conn) + dupCheckMap[connKey] = struct{}{} + } + + } + + return ret, nil +} + +// getProcInodes returns fd of the pid. +func getProcInodes(root string, pid int32, max int) (map[string][]inodeMap, error) { + ret := make(map[string][]inodeMap) + + dir := fmt.Sprintf("%s/%d/fd", root, pid) + f, err := os.Open(dir) + if err != nil { + return ret, err + } + defer f.Close() + dirEntries, err := f.ReadDir(max) + if err != nil { + return ret, err + } + for _, dirEntry := range dirEntries { + inodePath := fmt.Sprintf("%s/%d/fd/%s", root, pid, dirEntry.Name()) + + inode, err := os.Readlink(inodePath) + if err != nil { + continue + } + if !strings.HasPrefix(inode, "socket:[") { + continue + } + // the process is using a socket + l := len(inode) + inode = inode[8 : l-1] + _, ok := ret[inode] + if !ok { + ret[inode] = make([]inodeMap, 0) + } + fd, err := strconv.Atoi(dirEntry.Name()) + if err != nil { + continue + } + + i := inodeMap{ + pid: pid, + fd: uint32(fd), + } + ret[inode] = append(ret[inode], i) + } + return ret, nil +} + +// Pids retunres all pids. +// Note: this is a copy of process_linux.Pids() +// FIXME: Import process occures import cycle. +// move to common made other platform breaking. Need consider. +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) +} + +func PidsWithContext(ctx context.Context) ([]int32, error) { + var ret []int32 + + d, err := os.Open(common.HostProcWithContext(ctx)) + if err != nil { + return nil, err + } + defer d.Close() + + fnames, err := d.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, fname := range fnames { + pid, err := strconv.ParseInt(fname, 10, 32) + if err != nil { + // if not numeric name, just skip + continue + } + ret = append(ret, int32(pid)) + } + + return ret, nil +} + +// Note: the following is based off process_linux structs and methods +// we need these to fetch the owner of a process ID +// FIXME: Import process occures import cycle. +// see remarks on pids() +type process struct { + Pid int32 `json:"pid"` + uids []int32 +} + +// Uids returns user ids of the process as a slice of the int +func (p *process) getUids(ctx context.Context) ([]int32, error) { + err := p.fillFromStatus(ctx) + if err != nil { + return []int32{}, err + } + return p.uids, nil +} + +// Get status from /proc/(pid)/status +func (p *process) fillFromStatus(ctx context.Context) error { + pid := p.Pid + statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "status") + contents, err := os.ReadFile(statPath) + if err != nil { + return err + } + lines := strings.Split(string(contents), "\n") + for _, line := range lines { + tabParts := strings.SplitN(line, "\t", 2) + if len(tabParts) < 2 { + continue + } + value := tabParts[1] + switch strings.TrimRight(tabParts[0], ":") { + case "Uid": + p.uids = make([]int32, 0, 4) + for _, i := range strings.Split(value, "\t") { + v, err := strconv.ParseInt(i, 10, 32) + if err != nil { + return err + } + p.uids = append(p.uids, int32(v)) + } + } + } + return nil +} + +func getProcInodesAll(root string, max int) (map[string][]inodeMap, error) { + return getProcInodesAllWithContext(context.Background(), root, max) +} + +func getProcInodesAllWithContext(ctx context.Context, root string, max int) (map[string][]inodeMap, error) { + pids, err := PidsWithContext(ctx) + if err != nil { + return nil, err + } + ret := make(map[string][]inodeMap) + + for _, pid := range pids { + t, err := getProcInodes(root, pid, max) + if err != nil { + // skip if permission error or no longer exists + if os.IsPermission(err) || os.IsNotExist(err) || errors.Is(err, io.EOF) { + continue + } + return ret, err + } + if len(t) == 0 { + continue + } + // TODO: update ret. + ret = updateMap(ret, t) + } + return ret, nil +} + +// decodeAddress decode addresse represents addr in proc/net/* +// ex: +// "0500000A:0016" -> "10.0.0.5", 22 +// "0085002452100113070057A13F025401:0035" -> "2400:8500:1301:1052:a157:7:154:23f", 53 +func decodeAddress(family uint32, src string) (Addr, error) { + return decodeAddressWithContext(context.Background(), family, src) +} + +func decodeAddressWithContext(ctx context.Context, family uint32, src string) (Addr, error) { + t := strings.Split(src, ":") + if len(t) != 2 { + return Addr{}, fmt.Errorf("does not contain port, %s", src) + } + addr := t[0] + port, err := strconv.ParseUint(t[1], 16, 16) + if err != nil { + return Addr{}, fmt.Errorf("invalid port, %s", src) + } + decoded, err := hex.DecodeString(addr) + if err != nil { + return Addr{}, fmt.Errorf("decode error, %w", err) + } + var ip net.IP + + if family == syscall.AF_INET { + if common.IsLittleEndian() { + ip = net.IP(ReverseWithContext(ctx, decoded)) + } else { + ip = net.IP(decoded) + } + } else { // IPv6 + ip, err = parseIPv6HexStringWithContext(ctx, decoded) + if err != nil { + return Addr{}, err + } + } + return Addr{ + IP: ip.String(), + Port: uint32(port), + }, nil +} + +// Reverse reverses array of bytes. +func Reverse(s []byte) []byte { + return ReverseWithContext(context.Background(), s) +} + +func ReverseWithContext(ctx context.Context, s []byte) []byte { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } + return s +} + +// parseIPv6HexString parse array of bytes to IPv6 string +func parseIPv6HexString(src []byte) (net.IP, error) { + return parseIPv6HexStringWithContext(context.Background(), src) +} + +func parseIPv6HexStringWithContext(ctx context.Context, src []byte) (net.IP, error) { + if len(src) != 16 { + return nil, fmt.Errorf("invalid IPv6 string") + } + + buf := make([]byte, 0, 16) + for i := 0; i < len(src); i += 4 { + r := ReverseWithContext(ctx, src[i:i+4]) + buf = append(buf, r...) + } + return net.IP(buf), nil +} + +func processInet(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { + return processInetWithContext(context.Background(), file, kind, inodes, filterPid) +} + +func processInetWithContext(ctx context.Context, file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { + if strings.HasSuffix(file, "6") && !common.PathExists(file) { + // IPv6 not supported, return empty. + return []connTmp{}, nil + } + + // Read the contents of the /proc file with a single read sys call. + // This minimizes duplicates in the returned connections + // For more info: + // https://github.com/shirou/gopsutil/pull/361 + contents, err := os.ReadFile(file) + if err != nil { + return nil, err + } + + lines := bytes.Split(contents, []byte("\n")) + + var ret []connTmp + // skip first line + for _, line := range lines[1:] { + l := strings.Fields(string(line)) + if len(l) < 10 { + continue + } + laddr := l[1] + raddr := l[2] + status := l[3] + inode := l[9] + pid := int32(0) + fd := uint32(0) + i, exists := inodes[inode] + if exists { + pid = i[0].pid + fd = i[0].fd + } + if filterPid > 0 && filterPid != pid { + continue + } + if kind.sockType == syscall.SOCK_STREAM { + status = tcpStatuses[status] + } else { + status = "NONE" + } + la, err := decodeAddressWithContext(ctx, kind.family, laddr) + if err != nil { + continue + } + ra, err := decodeAddressWithContext(ctx, kind.family, raddr) + if err != nil { + continue + } + + ret = append(ret, connTmp{ + fd: fd, + family: kind.family, + sockType: kind.sockType, + laddr: la, + raddr: ra, + status: status, + pid: pid, + }) + } + + return ret, nil +} + +func processUnix(file string, kind netConnectionKindType, inodes map[string][]inodeMap, filterPid int32) ([]connTmp, error) { + // Read the contents of the /proc file with a single read sys call. + // This minimizes duplicates in the returned connections + // For more info: + // https://github.com/shirou/gopsutil/pull/361 + contents, err := os.ReadFile(file) + if err != nil { + return nil, err + } + + lines := bytes.Split(contents, []byte("\n")) + + var ret []connTmp + // skip first line + for _, line := range lines[1:] { + tokens := strings.Fields(string(line)) + if len(tokens) < 6 { + continue + } + st, err := strconv.Atoi(tokens[4]) + if err != nil { + return nil, err + } + + inode := tokens[6] + + var pairs []inodeMap + pairs, exists := inodes[inode] + if !exists { + pairs = []inodeMap{ + {}, + } + } + for _, pair := range pairs { + if filterPid > 0 && filterPid != pair.pid { + continue + } + var path string + if len(tokens) == 8 { + path = tokens[len(tokens)-1] + } + ret = append(ret, connTmp{ + fd: pair.fd, + family: kind.family, + sockType: uint32(st), + laddr: Addr{ + IP: path, + }, + pid: pair.pid, + status: "NONE", + path: path, + }) + } + } + + return ret, nil +} + +func updateMap(src map[string][]inodeMap, add map[string][]inodeMap) map[string][]inodeMap { + for key, value := range add { + a, exists := src[key] + if !exists { + src[key] = value + continue + } + src[key] = append(a, value...) + } + return src +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go new file mode 100644 index 0000000000000..50e37fe4029ce --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_openbsd.go @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd + +package net + +import ( + "context" + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + "syscall" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +var portMatch = regexp.MustCompile(`(.*)\.(\d+)$`) + +func ParseNetstat(output string, mode string, + iocs map[string]IOCountersStat, +) error { + lines := strings.Split(output, "\n") + + exists := make([]string, 0, len(lines)-1) + + columns := 6 + if mode == "ind" { + columns = 10 + } + for _, line := range lines { + values := strings.Fields(line) + if len(values) < 1 || values[0] == "Name" { + continue + } + if common.StringsHas(exists, values[0]) { + // skip if already get + continue + } + + if len(values) < columns { + continue + } + base := 1 + // sometimes Address is omitted + if len(values) < columns { + base = 0 + } + + parsed := make([]uint64, 0, 8) + var vv []string + if mode == "inb" { + vv = []string{ + values[base+3], // BytesRecv + values[base+4], // BytesSent + } + } else { + vv = []string{ + values[base+3], // Ipkts + values[base+4], // Ierrs + values[base+5], // Opkts + values[base+6], // Oerrs + values[base+8], // Drops + } + } + for _, target := range vv { + if target == "-" { + parsed = append(parsed, 0) + continue + } + + t, err := strconv.ParseUint(target, 10, 64) + if err != nil { + return err + } + parsed = append(parsed, t) + } + exists = append(exists, values[0]) + + n, present := iocs[values[0]] + if !present { + n = IOCountersStat{Name: values[0]} + } + if mode == "inb" { + n.BytesRecv = parsed[0] + n.BytesSent = parsed[1] + } else { + n.PacketsRecv = parsed[0] + n.Errin = parsed[1] + n.PacketsSent = parsed[2] + n.Errout = parsed[3] + n.Dropin = parsed[4] + n.Dropout = parsed[4] + } + + iocs[n.Name] = n + } + return nil +} + +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + netstat, err := exec.LookPath("netstat") + if err != nil { + return nil, err + } + out, err := invoke.CommandWithContext(ctx, netstat, "-inb") + if err != nil { + return nil, err + } + out2, err := invoke.CommandWithContext(ctx, netstat, "-ind") + if err != nil { + return nil, err + } + iocs := make(map[string]IOCountersStat) + + lines := strings.Split(string(out), "\n") + ret := make([]IOCountersStat, 0, len(lines)-1) + + err = ParseNetstat(string(out), "inb", iocs) + if err != nil { + return nil, err + } + err = ParseNetstat(string(out2), "ind", iocs) + if err != nil { + return nil, err + } + + for _, ioc := range iocs { + ret = append(ret, ioc) + } + + if pernic == false { + return getIOCountersAll(ret) + } + + return ret, nil +} + +// IOCountersByFile exists just for compatibility with Linux. +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCounters(pernic) +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +// ProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Not Implemented for OpenBSD +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func parseNetstatLine(line string) (ConnectionStat, error) { + f := strings.Fields(line) + if len(f) < 5 { + return ConnectionStat{}, fmt.Errorf("wrong line,%s", line) + } + + var netType, netFamily uint32 + switch f[0] { + case "tcp": + netType = syscall.SOCK_STREAM + netFamily = syscall.AF_INET + case "udp": + netType = syscall.SOCK_DGRAM + netFamily = syscall.AF_INET + case "tcp6": + netType = syscall.SOCK_STREAM + netFamily = syscall.AF_INET6 + case "udp6": + netType = syscall.SOCK_DGRAM + netFamily = syscall.AF_INET6 + default: + return ConnectionStat{}, fmt.Errorf("unknown type, %s", f[0]) + } + + laddr, raddr, err := parseNetstatAddr(f[3], f[4], netFamily) + if err != nil { + return ConnectionStat{}, fmt.Errorf("failed to parse netaddr, %s %s", f[3], f[4]) + } + + n := ConnectionStat{ + Fd: uint32(0), // not supported + Family: uint32(netFamily), + Type: uint32(netType), + Laddr: laddr, + Raddr: raddr, + Pid: int32(0), // not supported + } + if len(f) == 6 { + n.Status = f[5] + } + + return n, nil +} + +func parseNetstatAddr(local string, remote string, family uint32) (laddr Addr, raddr Addr, err error) { + parse := func(l string) (Addr, error) { + matches := portMatch.FindStringSubmatch(l) + if matches == nil { + return Addr{}, fmt.Errorf("wrong addr, %s", l) + } + host := matches[1] + port := matches[2] + if host == "*" { + switch family { + case syscall.AF_INET: + host = "0.0.0.0" + case syscall.AF_INET6: + host = "::" + default: + return Addr{}, fmt.Errorf("unknown family, %d", family) + } + } + lport, err := strconv.Atoi(port) + if err != nil { + return Addr{}, err + } + return Addr{IP: host, Port: uint32(lport)}, nil + } + + laddr, err = parse(local) + if remote != "*.*" { // remote addr exists + raddr, err = parse(remote) + if err != nil { + return laddr, raddr, err + } + } + + return laddr, raddr, err +} + +// Return a list of network connections opened. +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + var ret []ConnectionStat + + args := []string{"-na"} + switch strings.ToLower(kind) { + default: + fallthrough + case "": + fallthrough + case "all": + fallthrough + case "inet": + // nothing to add + case "inet4": + args = append(args, "-finet") + case "inet6": + args = append(args, "-finet6") + case "tcp": + args = append(args, "-ptcp") + case "tcp4": + args = append(args, "-ptcp", "-finet") + case "tcp6": + args = append(args, "-ptcp", "-finet6") + case "udp": + args = append(args, "-pudp") + case "udp4": + args = append(args, "-pudp", "-finet") + case "udp6": + args = append(args, "-pudp", "-finet6") + case "unix": + return ret, common.ErrNotImplementedError + } + + netstat, err := exec.LookPath("netstat") + if err != nil { + return nil, err + } + out, err := invoke.CommandWithContext(ctx, netstat, args...) + if err != nil { + return nil, err + } + lines := strings.Split(string(out), "\n") + for _, line := range lines { + if !(strings.HasPrefix(line, "tcp") || strings.HasPrefix(line, "udp")) { + continue + } + n, err := parseNetstatLine(line) + if err != nil { + continue + } + + ret = append(ret, n) + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go b/vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go new file mode 100644 index 0000000000000..b886066e82e5d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_solaris.go @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build solaris + +package net + +import ( + "context" + "fmt" + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +// NetIOCounters returnes network I/O statistics for every network +// interface installed on the system. If pernic argument is false, +// return only sum of all information (which name is 'all'). If true, +// every network interface installed on the system is returned +// separately. +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +var kstatSplit = regexp.MustCompile(`[:\s]+`) + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + // collect all the net class's links with below statistics + filterstr := "/^(?!vnic)/::phys:/^rbytes64$|^ipackets64$|^idrops64$|^ierrors$|^obytes64$|^opackets64$|^odrops64$|^oerrors$/" + if runtime.GOOS == "illumos" { + filterstr = "/[^vnic]/::mac:/^rbytes64$|^ipackets64$|^idrops64$|^ierrors$|^obytes64$|^opackets64$|^odrops64$|^oerrors$/" + } + kstatSysOut, err := invoke.CommandWithContext(ctx, "kstat", "-c", "net", "-p", filterstr) + if err != nil { + return nil, fmt.Errorf("cannot execute kstat: %w", err) + } + + lines := strings.Split(strings.TrimSpace(string(kstatSysOut)), "\n") + if len(lines) == 0 { + return nil, fmt.Errorf("no interface found") + } + rbytes64arr := make(map[string]uint64) + ipackets64arr := make(map[string]uint64) + idrops64arr := make(map[string]uint64) + ierrorsarr := make(map[string]uint64) + obytes64arr := make(map[string]uint64) + opackets64arr := make(map[string]uint64) + odrops64arr := make(map[string]uint64) + oerrorsarr := make(map[string]uint64) + + for _, line := range lines { + fields := kstatSplit.Split(line, -1) + interfaceName := fields[0] + instance := fields[1] + switch fields[3] { + case "rbytes64": + rbytes64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse rbytes64: %w", err) + } + case "ipackets64": + ipackets64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse ipackets64: %w", err) + } + case "idrops64": + idrops64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse idrops64: %w", err) + } + case "ierrors": + ierrorsarr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse ierrors: %w", err) + } + case "obytes64": + obytes64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse obytes64: %w", err) + } + case "opackets64": + opackets64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse opackets64: %w", err) + } + case "odrops64": + odrops64arr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse odrops64: %w", err) + } + case "oerrors": + oerrorsarr[interfaceName+instance], err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, fmt.Errorf("cannot parse oerrors: %w", err) + } + } + } + ret := make([]IOCountersStat, 0) + for k := range rbytes64arr { + nic := IOCountersStat{ + Name: k, + BytesRecv: rbytes64arr[k], + PacketsRecv: ipackets64arr[k], + Errin: ierrorsarr[k], + Dropin: idrops64arr[k], + BytesSent: obytes64arr[k], + PacketsSent: opackets64arr[k], + Errout: oerrorsarr[k], + Dropout: odrops64arr[k], + } + ret = append(ret, nic) + } + + if !pernic { + return getIOCountersAll(ret) + } + + return ret, nil +} + +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return []FilterStat{}, common.ErrNotImplementedError +} + +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return []ProtoCountersStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go b/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go new file mode 100644 index 0000000000000..71fc3b972a298 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_unix.go @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build freebsd || darwin + +package net + +import ( + "context" + "fmt" + "net" + "strconv" + "strings" + "syscall" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +// Return a list of network connections opened. +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(ctx, kind, 0) +} + +// Return a list of network connections opened returning at most `max` +// connections for each running process. +func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +// Return a list of network connections opened by a process. +func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + var ret []ConnectionStat + + args := []string{"-i"} + switch strings.ToLower(kind) { + default: + fallthrough + case "": + fallthrough + case "all": + fallthrough + case "inet": + args = append(args, "tcp", "-i", "udp") + case "inet4": + args = append(args, "4") + case "inet6": + args = append(args, "6") + case "tcp": + args = append(args, "tcp") + case "tcp4": + args = append(args, "4tcp") + case "tcp6": + args = append(args, "6tcp") + case "udp": + args = append(args, "udp") + case "udp4": + args = append(args, "4udp") + case "udp6": + args = append(args, "6udp") + case "unix": + args = []string{"-U"} + } + + r, err := common.CallLsofWithContext(ctx, invoke, pid, args...) + if err != nil { + return nil, err + } + for _, rr := range r { + if strings.HasPrefix(rr, "COMMAND") { + continue + } + n, err := parseNetLine(rr) + if err != nil { + continue + } + + ret = append(ret, n) + } + + return ret, nil +} + +var constMap = map[string]int{ + "unix": syscall.AF_UNIX, + "TCP": syscall.SOCK_STREAM, + "UDP": syscall.SOCK_DGRAM, + "IPv4": syscall.AF_INET, + "IPv6": syscall.AF_INET6, +} + +func parseNetLine(line string) (ConnectionStat, error) { + f := strings.Fields(line) + if len(f) < 8 { + return ConnectionStat{}, fmt.Errorf("wrong line,%s", line) + } + + if len(f) == 8 { + f = append(f, f[7]) + f[7] = "unix" + } + + pid, err := strconv.Atoi(f[1]) + if err != nil { + return ConnectionStat{}, err + } + fd, err := strconv.Atoi(strings.Trim(f[3], "u")) + if err != nil { + return ConnectionStat{}, fmt.Errorf("unknown fd, %s", f[3]) + } + netFamily, ok := constMap[f[4]] + if !ok { + return ConnectionStat{}, fmt.Errorf("unknown family, %s", f[4]) + } + netType, ok := constMap[f[7]] + if !ok { + return ConnectionStat{}, fmt.Errorf("unknown type, %s", f[7]) + } + + var laddr, raddr Addr + if f[7] == "unix" { + laddr.IP = f[8] + } else { + laddr, raddr, err = parseNetAddr(f[8]) + if err != nil { + return ConnectionStat{}, fmt.Errorf("failed to parse netaddr, %s", f[8]) + } + } + + n := ConnectionStat{ + Fd: uint32(fd), + Family: uint32(netFamily), + Type: uint32(netType), + Laddr: laddr, + Raddr: raddr, + Pid: int32(pid), + } + if len(f) == 10 { + n.Status = strings.Trim(f[9], "()") + } + + return n, nil +} + +func parseNetAddr(line string) (laddr Addr, raddr Addr, err error) { + parse := func(l string) (Addr, error) { + host, port, err := net.SplitHostPort(l) + if err != nil { + return Addr{}, fmt.Errorf("wrong addr, %s", l) + } + lport, err := strconv.Atoi(port) + if err != nil { + return Addr{}, err + } + return Addr{IP: host, Port: uint32(lport)}, nil + } + + addrs := strings.Split(line, "->") + if len(addrs) == 0 { + return laddr, raddr, fmt.Errorf("wrong netaddr, %s", line) + } + laddr, err = parse(addrs[0]) + if len(addrs) == 2 { // remote addr exists + raddr, err = parse(addrs[1]) + if err != nil { + return laddr, raddr, err + } + } + + return laddr, raddr, err +} + +// Return up to `max` network connections opened by a process. +func ConnectionsPidMax(kind string, pid int32, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithContext(context.Background(), kind, pid, max) +} + +func ConnectionsPidMaxWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +// Return a list of network connections opened, omitting `Uids`. +// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be +// removed from the API in the future. +func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { + return ConnectionsWithoutUidsWithContext(context.Background(), kind) +} + +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) +} + +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +} + +func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +} + +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go b/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go new file mode 100644 index 0000000000000..12f62cda05bc5 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/net/net_windows.go @@ -0,0 +1,779 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build windows + +package net + +import ( + "context" + "fmt" + "net" + "os" + "syscall" + "unsafe" + + "github.com/shirou/gopsutil/v4/internal/common" + "golang.org/x/sys/windows" +) + +var ( + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + procGetExtendedTCPTable = modiphlpapi.NewProc("GetExtendedTcpTable") + procGetExtendedUDPTable = modiphlpapi.NewProc("GetExtendedUdpTable") + procGetIfEntry2 = modiphlpapi.NewProc("GetIfEntry2") +) + +const ( + TCPTableBasicListener = iota + TCPTableBasicConnections + TCPTableBasicAll + TCPTableOwnerPIDListener + TCPTableOwnerPIDConnections + TCPTableOwnerPIDAll + TCPTableOwnerModuleListener + TCPTableOwnerModuleConnections + TCPTableOwnerModuleAll +) + +type netConnectionKindType struct { + family uint32 + sockType uint32 + filename string +} + +var kindTCP4 = netConnectionKindType{ + family: syscall.AF_INET, + sockType: syscall.SOCK_STREAM, + filename: "tcp", +} + +var kindTCP6 = netConnectionKindType{ + family: syscall.AF_INET6, + sockType: syscall.SOCK_STREAM, + filename: "tcp6", +} + +var kindUDP4 = netConnectionKindType{ + family: syscall.AF_INET, + sockType: syscall.SOCK_DGRAM, + filename: "udp", +} + +var kindUDP6 = netConnectionKindType{ + family: syscall.AF_INET6, + sockType: syscall.SOCK_DGRAM, + filename: "udp6", +} + +var netConnectionKindMap = map[string][]netConnectionKindType{ + "all": {kindTCP4, kindTCP6, kindUDP4, kindUDP6}, + "tcp": {kindTCP4, kindTCP6}, + "tcp4": {kindTCP4}, + "tcp6": {kindTCP6}, + "udp": {kindUDP4, kindUDP6}, + "udp4": {kindUDP4}, + "udp6": {kindUDP6}, + "inet": {kindTCP4, kindTCP6, kindUDP4, kindUDP6}, + "inet4": {kindTCP4, kindUDP4}, + "inet6": {kindTCP6, kindUDP6}, +} + +// https://github.com/microsoft/ethr/blob/aecdaf923970e5a9b4c461b4e2e3963d781ad2cc/plt_windows.go#L114-L170 +type guid struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +const ( + maxStringSize = 256 + maxPhysAddressLength = 32 + pad0for64_4for32 = 0 +) + +type mibIfRow2 struct { + InterfaceLuid uint64 + InterfaceIndex uint32 + InterfaceGuid guid + Alias [maxStringSize + 1]uint16 + Description [maxStringSize + 1]uint16 + PhysicalAddressLength uint32 + PhysicalAddress [maxPhysAddressLength]uint8 + PermanentPhysicalAddress [maxPhysAddressLength]uint8 + Mtu uint32 + Type uint32 + TunnelType uint32 + MediaType uint32 + PhysicalMediumType uint32 + AccessType uint32 + DirectionType uint32 + InterfaceAndOperStatusFlags uint32 + OperStatus uint32 + AdminStatus uint32 + MediaConnectState uint32 + NetworkGuid guid + ConnectionType uint32 + padding1 [pad0for64_4for32]byte + TransmitLinkSpeed uint64 + ReceiveLinkSpeed uint64 + InOctets uint64 + InUcastPkts uint64 + InNUcastPkts uint64 + InDiscards uint64 + InErrors uint64 + InUnknownProtos uint64 + InUcastOctets uint64 + InMulticastOctets uint64 + InBroadcastOctets uint64 + OutOctets uint64 + OutUcastPkts uint64 + OutNUcastPkts uint64 + OutDiscards uint64 + OutErrors uint64 + OutUcastOctets uint64 + OutMulticastOctets uint64 + OutBroadcastOctets uint64 + OutQLen uint64 +} + +func IOCounters(pernic bool) ([]IOCountersStat, error) { + return IOCountersWithContext(context.Background(), pernic) +} + +func IOCountersWithContext(ctx context.Context, pernic bool) ([]IOCountersStat, error) { + ifs, err := net.Interfaces() + if err != nil { + return nil, err + } + var counters []IOCountersStat + + err = procGetIfEntry2.Find() + if err == nil { // Vista+, uint64 values (issue#693) + for _, ifi := range ifs { + c := IOCountersStat{ + Name: ifi.Name, + } + + row := mibIfRow2{InterfaceIndex: uint32(ifi.Index)} + ret, _, err := procGetIfEntry2.Call(uintptr(unsafe.Pointer(&row))) + if ret != 0 { + return nil, os.NewSyscallError("GetIfEntry2", err) + } + c.BytesSent = uint64(row.OutOctets) + c.BytesRecv = uint64(row.InOctets) + c.PacketsSent = uint64(row.OutUcastPkts) + c.PacketsRecv = uint64(row.InUcastPkts) + c.Errin = uint64(row.InErrors) + c.Errout = uint64(row.OutErrors) + c.Dropin = uint64(row.InDiscards) + c.Dropout = uint64(row.OutDiscards) + + counters = append(counters, c) + } + } else { // WinXP fallback, uint32 values + for _, ifi := range ifs { + c := IOCountersStat{ + Name: ifi.Name, + } + + row := windows.MibIfRow{Index: uint32(ifi.Index)} + err = windows.GetIfEntry(&row) + if err != nil { + return nil, os.NewSyscallError("GetIfEntry", err) + } + c.BytesSent = uint64(row.OutOctets) + c.BytesRecv = uint64(row.InOctets) + c.PacketsSent = uint64(row.OutUcastPkts) + c.PacketsRecv = uint64(row.InUcastPkts) + c.Errin = uint64(row.InErrors) + c.Errout = uint64(row.OutErrors) + c.Dropin = uint64(row.InDiscards) + c.Dropout = uint64(row.OutDiscards) + + counters = append(counters, c) + } + } + + if !pernic { + return getIOCountersAll(counters) + } + return counters, nil +} + +// IOCountersByFile exists just for compatibility with Linux. +func IOCountersByFile(pernic bool, filename string) ([]IOCountersStat, error) { + return IOCountersByFileWithContext(context.Background(), pernic, filename) +} + +func IOCountersByFileWithContext(ctx context.Context, pernic bool, filename string) ([]IOCountersStat, error) { + return IOCounters(pernic) +} + +// Return a list of network connections +// Available kind: +// +// reference to netConnectionKindMap +func Connections(kind string) ([]ConnectionStat, error) { + return ConnectionsWithContext(context.Background(), kind) +} + +func ConnectionsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(ctx, kind, 0) +} + +// ConnectionsPid Return a list of network connections opened by a process +func ConnectionsPid(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + tmap, ok := netConnectionKindMap[kind] + if !ok { + return nil, fmt.Errorf("invalid kind, %s", kind) + } + return getProcInet(tmap, pid) +} + +func getProcInet(kinds []netConnectionKindType, pid int32) ([]ConnectionStat, error) { + stats := make([]ConnectionStat, 0) + + for _, kind := range kinds { + s, err := getNetStatWithKind(kind) + if err != nil { + continue + } + + if pid == 0 { + stats = append(stats, s...) + } else { + for _, ns := range s { + if ns.Pid != pid { + continue + } + stats = append(stats, ns) + } + } + } + + return stats, nil +} + +func getNetStatWithKind(kindType netConnectionKindType) ([]ConnectionStat, error) { + if kindType.filename == "" { + return nil, fmt.Errorf("kind filename must be required") + } + + switch kindType.filename { + case kindTCP4.filename: + return getTCPConnections(kindTCP4.family) + case kindTCP6.filename: + return getTCPConnections(kindTCP6.family) + case kindUDP4.filename: + return getUDPConnections(kindUDP4.family) + case kindUDP6.filename: + return getUDPConnections(kindUDP6.family) + } + + return nil, fmt.Errorf("invalid kind filename, %s", kindType.filename) +} + +// Return a list of network connections opened returning at most `max` +// connections for each running process. +func ConnectionsMax(kind string, max int) ([]ConnectionStat, error) { + return ConnectionsMaxWithContext(context.Background(), kind, max) +} + +func ConnectionsMaxWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +// Return a list of network connections opened, omitting `Uids`. +// WithoutUids functions are reliant on implementation details. They may be altered to be an alias for Connections or be +// removed from the API in the future. +func ConnectionsWithoutUids(kind string) ([]ConnectionStat, error) { + return ConnectionsWithoutUidsWithContext(context.Background(), kind) +} + +func ConnectionsWithoutUidsWithContext(ctx context.Context, kind string) ([]ConnectionStat, error) { + return ConnectionsMaxWithoutUidsWithContext(ctx, kind, 0) +} + +func ConnectionsMaxWithoutUidsWithContext(ctx context.Context, kind string, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, 0, max) +} + +func ConnectionsPidWithoutUids(kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidWithoutUidsWithContext(context.Background(), kind, pid) +} + +func ConnectionsPidWithoutUidsWithContext(ctx context.Context, kind string, pid int32) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, 0) +} + +func ConnectionsPidMaxWithoutUids(kind string, pid int32, max int) ([]ConnectionStat, error) { + return ConnectionsPidMaxWithoutUidsWithContext(context.Background(), kind, pid, max) +} + +func ConnectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + return connectionsPidMaxWithoutUidsWithContext(ctx, kind, pid, max) +} + +func connectionsPidMaxWithoutUidsWithContext(ctx context.Context, kind string, pid int32, max int) ([]ConnectionStat, error) { + return []ConnectionStat{}, common.ErrNotImplementedError +} + +func FilterCounters() ([]FilterStat, error) { + return FilterCountersWithContext(context.Background()) +} + +func FilterCountersWithContext(ctx context.Context) ([]FilterStat, error) { + return nil, common.ErrNotImplementedError +} + +func ConntrackStats(percpu bool) ([]ConntrackStat, error) { + return ConntrackStatsWithContext(context.Background(), percpu) +} + +func ConntrackStatsWithContext(ctx context.Context, percpu bool) ([]ConntrackStat, error) { + return nil, common.ErrNotImplementedError +} + +// ProtoCounters returns network statistics for the entire system +// If protocols is empty then all protocols are returned, otherwise +// just the protocols in the list are returned. +// Not Implemented for Windows +func ProtoCounters(protocols []string) ([]ProtoCountersStat, error) { + return ProtoCountersWithContext(context.Background(), protocols) +} + +func ProtoCountersWithContext(ctx context.Context, protocols []string) ([]ProtoCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func getTableUintptr(family uint32, buf []byte) uintptr { + var ( + pmibTCPTable pmibTCPTableOwnerPidAll + pmibTCP6Table pmibTCP6TableOwnerPidAll + + p uintptr + ) + switch family { + case kindTCP4.family: + if len(buf) > 0 { + pmibTCPTable = (*mibTCPTableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibTCPTable)) + } else { + p = uintptr(unsafe.Pointer(pmibTCPTable)) + } + case kindTCP6.family: + if len(buf) > 0 { + pmibTCP6Table = (*mibTCP6TableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibTCP6Table)) + } else { + p = uintptr(unsafe.Pointer(pmibTCP6Table)) + } + } + return p +} + +func getTableInfo(filename string, table interface{}) (index, step, length int) { + switch filename { + case kindTCP4.filename: + index = int(unsafe.Sizeof(table.(pmibTCPTableOwnerPidAll).DwNumEntries)) + step = int(unsafe.Sizeof(table.(pmibTCPTableOwnerPidAll).Table)) + length = int(table.(pmibTCPTableOwnerPidAll).DwNumEntries) + case kindTCP6.filename: + index = int(unsafe.Sizeof(table.(pmibTCP6TableOwnerPidAll).DwNumEntries)) + step = int(unsafe.Sizeof(table.(pmibTCP6TableOwnerPidAll).Table)) + length = int(table.(pmibTCP6TableOwnerPidAll).DwNumEntries) + case kindUDP4.filename: + index = int(unsafe.Sizeof(table.(pmibUDPTableOwnerPid).DwNumEntries)) + step = int(unsafe.Sizeof(table.(pmibUDPTableOwnerPid).Table)) + length = int(table.(pmibUDPTableOwnerPid).DwNumEntries) + case kindUDP6.filename: + index = int(unsafe.Sizeof(table.(pmibUDP6TableOwnerPid).DwNumEntries)) + step = int(unsafe.Sizeof(table.(pmibUDP6TableOwnerPid).Table)) + length = int(table.(pmibUDP6TableOwnerPid).DwNumEntries) + } + + return +} + +func getTCPConnections(family uint32) ([]ConnectionStat, error) { + var ( + p uintptr + buf []byte + size uint32 + + pmibTCPTable pmibTCPTableOwnerPidAll + pmibTCP6Table pmibTCP6TableOwnerPidAll + ) + + if family == 0 { + return nil, fmt.Errorf("faimly must be required") + } + + for { + switch family { + case kindTCP4.family: + if len(buf) > 0 { + pmibTCPTable = (*mibTCPTableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibTCPTable)) + } else { + p = uintptr(unsafe.Pointer(pmibTCPTable)) + } + case kindTCP6.family: + if len(buf) > 0 { + pmibTCP6Table = (*mibTCP6TableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibTCP6Table)) + } else { + p = uintptr(unsafe.Pointer(pmibTCP6Table)) + } + } + + err := getExtendedTcpTable(p, + &size, + true, + family, + tcpTableOwnerPidAll, + 0) + if err == nil { + break + } + if err != windows.ERROR_INSUFFICIENT_BUFFER { + return nil, err + } + buf = make([]byte, size) + } + + var ( + index, step int + length int + ) + + stats := make([]ConnectionStat, 0) + switch family { + case kindTCP4.family: + index, step, length = getTableInfo(kindTCP4.filename, pmibTCPTable) + case kindTCP6.family: + index, step, length = getTableInfo(kindTCP6.filename, pmibTCP6Table) + } + + if length == 0 { + return nil, nil + } + + for i := 0; i < length; i++ { + switch family { + case kindTCP4.family: + mibs := (*mibTCPRowOwnerPid)(unsafe.Pointer(&buf[index])) + ns := mibs.convertToConnectionStat() + stats = append(stats, ns) + case kindTCP6.family: + mibs := (*mibTCP6RowOwnerPid)(unsafe.Pointer(&buf[index])) + ns := mibs.convertToConnectionStat() + stats = append(stats, ns) + } + + index += step + } + return stats, nil +} + +func getUDPConnections(family uint32) ([]ConnectionStat, error) { + var ( + p uintptr + buf []byte + size uint32 + + pmibUDPTable pmibUDPTableOwnerPid + pmibUDP6Table pmibUDP6TableOwnerPid + ) + + if family == 0 { + return nil, fmt.Errorf("faimly must be required") + } + + for { + switch family { + case kindUDP4.family: + if len(buf) > 0 { + pmibUDPTable = (*mibUDPTableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibUDPTable)) + } else { + p = uintptr(unsafe.Pointer(pmibUDPTable)) + } + case kindUDP6.family: + if len(buf) > 0 { + pmibUDP6Table = (*mibUDP6TableOwnerPid)(unsafe.Pointer(&buf[0])) + p = uintptr(unsafe.Pointer(pmibUDP6Table)) + } else { + p = uintptr(unsafe.Pointer(pmibUDP6Table)) + } + } + + err := getExtendedUdpTable( + p, + &size, + true, + family, + udpTableOwnerPid, + 0, + ) + if err == nil { + break + } + if err != windows.ERROR_INSUFFICIENT_BUFFER { + return nil, err + } + buf = make([]byte, size) + } + + var index, step, length int + + stats := make([]ConnectionStat, 0) + switch family { + case kindUDP4.family: + index, step, length = getTableInfo(kindUDP4.filename, pmibUDPTable) + case kindUDP6.family: + index, step, length = getTableInfo(kindUDP6.filename, pmibUDP6Table) + } + + if length == 0 { + return nil, nil + } + + for i := 0; i < length; i++ { + switch family { + case kindUDP4.family: + mibs := (*mibUDPRowOwnerPid)(unsafe.Pointer(&buf[index])) + ns := mibs.convertToConnectionStat() + stats = append(stats, ns) + case kindUDP6.family: + mibs := (*mibUDP6RowOwnerPid)(unsafe.Pointer(&buf[index])) + ns := mibs.convertToConnectionStat() + stats = append(stats, ns) + } + + index += step + } + return stats, nil +} + +// tcpStatuses https://msdn.microsoft.com/en-us/library/windows/desktop/bb485761(v=vs.85).aspx +var tcpStatuses = map[mibTCPState]string{ + 1: "CLOSED", + 2: "LISTEN", + 3: "SYN_SENT", + 4: "SYN_RECEIVED", + 5: "ESTABLISHED", + 6: "FIN_WAIT_1", + 7: "FIN_WAIT_2", + 8: "CLOSE_WAIT", + 9: "CLOSING", + 10: "LAST_ACK", + 11: "TIME_WAIT", + 12: "DELETE", +} + +func getExtendedTcpTable(pTcpTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass tcpTableClass, reserved uint32) (errcode error) { + r1, _, _ := syscall.Syscall6(procGetExtendedTCPTable.Addr(), 6, pTcpTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved)) + if r1 != 0 { + errcode = syscall.Errno(r1) + } + return +} + +func getExtendedUdpTable(pUdpTable uintptr, pdwSize *uint32, bOrder bool, ulAf uint32, tableClass udpTableClass, reserved uint32) (errcode error) { + r1, _, _ := syscall.Syscall6(procGetExtendedUDPTable.Addr(), 6, pUdpTable, uintptr(unsafe.Pointer(pdwSize)), getUintptrFromBool(bOrder), uintptr(ulAf), uintptr(tableClass), uintptr(reserved)) + if r1 != 0 { + errcode = syscall.Errno(r1) + } + return +} + +func getUintptrFromBool(b bool) uintptr { + if b { + return 1 + } + return 0 +} + +const anySize = 1 + +// type MIB_TCP_STATE int32 +type mibTCPState int32 + +type tcpTableClass int32 + +const ( + tcpTableBasicListener tcpTableClass = iota + tcpTableBasicConnections + tcpTableBasicAll + tcpTableOwnerPidListener + tcpTableOwnerPidConnections + tcpTableOwnerPidAll + tcpTableOwnerModuleListener + tcpTableOwnerModuleConnections + tcpTableOwnerModuleAll +) + +type udpTableClass int32 + +const ( + udpTableBasic udpTableClass = iota + udpTableOwnerPid + udpTableOwnerModule +) + +// TCP + +type mibTCPRowOwnerPid struct { + DwState uint32 + DwLocalAddr uint32 + DwLocalPort uint32 + DwRemoteAddr uint32 + DwRemotePort uint32 + DwOwningPid uint32 +} + +func (m *mibTCPRowOwnerPid) convertToConnectionStat() ConnectionStat { + ns := ConnectionStat{ + Family: kindTCP4.family, + Type: kindTCP4.sockType, + Laddr: Addr{ + IP: parseIPv4HexString(m.DwLocalAddr), + Port: uint32(decodePort(m.DwLocalPort)), + }, + Raddr: Addr{ + IP: parseIPv4HexString(m.DwRemoteAddr), + Port: uint32(decodePort(m.DwRemotePort)), + }, + Pid: int32(m.DwOwningPid), + Status: tcpStatuses[mibTCPState(m.DwState)], + } + + return ns +} + +type mibTCPTableOwnerPid struct { + DwNumEntries uint32 + Table [anySize]mibTCPRowOwnerPid +} + +type mibTCP6RowOwnerPid struct { + UcLocalAddr [16]byte + DwLocalScopeId uint32 + DwLocalPort uint32 + UcRemoteAddr [16]byte + DwRemoteScopeId uint32 + DwRemotePort uint32 + DwState uint32 + DwOwningPid uint32 +} + +func (m *mibTCP6RowOwnerPid) convertToConnectionStat() ConnectionStat { + ns := ConnectionStat{ + Family: kindTCP6.family, + Type: kindTCP6.sockType, + Laddr: Addr{ + IP: parseIPv6HexString(m.UcLocalAddr), + Port: uint32(decodePort(m.DwLocalPort)), + }, + Raddr: Addr{ + IP: parseIPv6HexString(m.UcRemoteAddr), + Port: uint32(decodePort(m.DwRemotePort)), + }, + Pid: int32(m.DwOwningPid), + Status: tcpStatuses[mibTCPState(m.DwState)], + } + + return ns +} + +type mibTCP6TableOwnerPid struct { + DwNumEntries uint32 + Table [anySize]mibTCP6RowOwnerPid +} + +type ( + pmibTCPTableOwnerPidAll *mibTCPTableOwnerPid + pmibTCP6TableOwnerPidAll *mibTCP6TableOwnerPid +) + +// UDP + +type mibUDPRowOwnerPid struct { + DwLocalAddr uint32 + DwLocalPort uint32 + DwOwningPid uint32 +} + +func (m *mibUDPRowOwnerPid) convertToConnectionStat() ConnectionStat { + ns := ConnectionStat{ + Family: kindUDP4.family, + Type: kindUDP4.sockType, + Laddr: Addr{ + IP: parseIPv4HexString(m.DwLocalAddr), + Port: uint32(decodePort(m.DwLocalPort)), + }, + Pid: int32(m.DwOwningPid), + } + + return ns +} + +type mibUDPTableOwnerPid struct { + DwNumEntries uint32 + Table [anySize]mibUDPRowOwnerPid +} + +type mibUDP6RowOwnerPid struct { + UcLocalAddr [16]byte + DwLocalScopeId uint32 + DwLocalPort uint32 + DwOwningPid uint32 +} + +func (m *mibUDP6RowOwnerPid) convertToConnectionStat() ConnectionStat { + ns := ConnectionStat{ + Family: kindUDP6.family, + Type: kindUDP6.sockType, + Laddr: Addr{ + IP: parseIPv6HexString(m.UcLocalAddr), + Port: uint32(decodePort(m.DwLocalPort)), + }, + Pid: int32(m.DwOwningPid), + } + + return ns +} + +type mibUDP6TableOwnerPid struct { + DwNumEntries uint32 + Table [anySize]mibUDP6RowOwnerPid +} + +type ( + pmibUDPTableOwnerPid *mibUDPTableOwnerPid + pmibUDP6TableOwnerPid *mibUDP6TableOwnerPid +) + +func decodePort(port uint32) uint16 { + return syscall.Ntohs(uint16(port)) +} + +func parseIPv4HexString(addr uint32) string { + return fmt.Sprintf("%d.%d.%d.%d", addr&255, addr>>8&255, addr>>16&255, addr>>24&255) +} + +func parseIPv6HexString(addr [16]byte) string { + var ret [16]byte + for i := 0; i < 16; i++ { + ret[i] = uint8(addr[i]) + } + + // convert []byte to net.IP + ip := net.IP(ret[:]) + return ip.String() +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process.go b/vendor/github.com/shirou/gopsutil/v4/process/process.go new file mode 100644 index 0000000000000..4082fc95a2c63 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process.go @@ -0,0 +1,628 @@ +// SPDX-License-Identifier: BSD-3-Clause +package process + +import ( + "context" + "encoding/json" + "errors" + "runtime" + "sort" + "sync" + "time" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/mem" + "github.com/shirou/gopsutil/v4/net" +) + +var ( + invoke common.Invoker = common.Invoke{} + ErrorNoChildren = errors.New("process does not have children") + ErrorProcessNotRunning = errors.New("process does not exist") + ErrorNotPermitted = errors.New("operation not permitted") +) + +type Process struct { + Pid int32 `json:"pid"` + name string + status string + parent int32 + parentMutex sync.RWMutex // for windows ppid cache + numCtxSwitches *NumCtxSwitchesStat + uids []uint32 + gids []uint32 + groups []uint32 + numThreads int32 + memInfo *MemoryInfoStat + sigInfo *SignalInfoStat + createTime int64 + + lastCPUTimes *cpu.TimesStat + lastCPUTime time.Time + + tgid int32 +} + +// Process status +const ( + // Running marks a task a running or runnable (on the run queue) + Running = "running" + // Blocked marks a task waiting on a short, uninterruptible operation (usually I/O) + Blocked = "blocked" + // Idle marks a task sleeping for more than about 20 seconds + Idle = "idle" + // Lock marks a task waiting to acquire a lock + Lock = "lock" + // Sleep marks task waiting for short, interruptible operation + Sleep = "sleep" + // Stop marks a stopped process + Stop = "stop" + // Wait marks an idle interrupt thread (or paging in pre 2.6.xx Linux) + Wait = "wait" + // Zombie marks a defunct process, terminated but not reaped by its parent + Zombie = "zombie" + + // Solaris states. See https://github.com/collectd/collectd/blob/1da3305c10c8ff9a63081284cf3d4bb0f6daffd8/src/processes.c#L2115 + Daemon = "daemon" + Detached = "detached" + System = "system" + Orphan = "orphan" + + UnknownState = "" +) + +type OpenFilesStat struct { + Path string `json:"path"` + Fd uint64 `json:"fd"` +} + +type MemoryInfoStat struct { + RSS uint64 `json:"rss"` // bytes + VMS uint64 `json:"vms"` // bytes + HWM uint64 `json:"hwm"` // bytes + Data uint64 `json:"data"` // bytes + Stack uint64 `json:"stack"` // bytes + Locked uint64 `json:"locked"` // bytes + Swap uint64 `json:"swap"` // bytes +} + +type SignalInfoStat struct { + PendingProcess uint64 `json:"pending_process"` + PendingThread uint64 `json:"pending_thread"` + Blocked uint64 `json:"blocked"` + Ignored uint64 `json:"ignored"` + Caught uint64 `json:"caught"` +} + +type RlimitStat struct { + Resource int32 `json:"resource"` + Soft uint64 `json:"soft"` + Hard uint64 `json:"hard"` + Used uint64 `json:"used"` +} + +type IOCountersStat struct { + ReadCount uint64 `json:"readCount"` + WriteCount uint64 `json:"writeCount"` + ReadBytes uint64 `json:"readBytes"` + WriteBytes uint64 `json:"writeBytes"` +} + +type NumCtxSwitchesStat struct { + Voluntary int64 `json:"voluntary"` + Involuntary int64 `json:"involuntary"` +} + +type PageFaultsStat struct { + MinorFaults uint64 `json:"minorFaults"` + MajorFaults uint64 `json:"majorFaults"` + ChildMinorFaults uint64 `json:"childMinorFaults"` + ChildMajorFaults uint64 `json:"childMajorFaults"` +} + +// Resource limit constants are from /usr/include/x86_64-linux-gnu/bits/resource.h +// from libc6-dev package in Ubuntu 16.10 +const ( + RLIMIT_CPU int32 = 0 + RLIMIT_FSIZE int32 = 1 + RLIMIT_DATA int32 = 2 + RLIMIT_STACK int32 = 3 + RLIMIT_CORE int32 = 4 + RLIMIT_RSS int32 = 5 + RLIMIT_NPROC int32 = 6 + RLIMIT_NOFILE int32 = 7 + RLIMIT_MEMLOCK int32 = 8 + RLIMIT_AS int32 = 9 + RLIMIT_LOCKS int32 = 10 + RLIMIT_SIGPENDING int32 = 11 + RLIMIT_MSGQUEUE int32 = 12 + RLIMIT_NICE int32 = 13 + RLIMIT_RTPRIO int32 = 14 + RLIMIT_RTTIME int32 = 15 +) + +func (p Process) String() string { + s, _ := json.Marshal(p) + return string(s) +} + +func (o OpenFilesStat) String() string { + s, _ := json.Marshal(o) + return string(s) +} + +func (m MemoryInfoStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} + +func (r RlimitStat) String() string { + s, _ := json.Marshal(r) + return string(s) +} + +func (i IOCountersStat) String() string { + s, _ := json.Marshal(i) + return string(s) +} + +func (p NumCtxSwitchesStat) String() string { + s, _ := json.Marshal(p) + return string(s) +} + +var enableBootTimeCache bool + +// EnableBootTimeCache change cache behavior of BootTime. If true, cache BootTime value. Default is false. +func EnableBootTimeCache(enable bool) { + enableBootTimeCache = enable +} + +// Pids returns a slice of process ID list which are running now. +func Pids() ([]int32, error) { + return PidsWithContext(context.Background()) +} + +func PidsWithContext(ctx context.Context) ([]int32, error) { + pids, err := pidsWithContext(ctx) + sort.Slice(pids, func(i, j int) bool { return pids[i] < pids[j] }) + return pids, err +} + +// Processes returns a slice of pointers to Process structs for all +// currently running processes. +func Processes() ([]*Process, error) { + return ProcessesWithContext(context.Background()) +} + +// NewProcess creates a new Process instance, it only stores the pid and +// checks that the process exists. Other method on Process can be used +// to get more information about the process. An error will be returned +// if the process does not exist. +func NewProcess(pid int32) (*Process, error) { + return NewProcessWithContext(context.Background(), pid) +} + +func NewProcessWithContext(ctx context.Context, pid int32) (*Process, error) { + p := &Process{ + Pid: pid, + } + + exists, err := PidExistsWithContext(ctx, pid) + if err != nil { + return p, err + } + if !exists { + return p, ErrorProcessNotRunning + } + p.CreateTimeWithContext(ctx) + return p, nil +} + +func PidExists(pid int32) (bool, error) { + return PidExistsWithContext(context.Background(), pid) +} + +// Background returns true if the process is in background, false otherwise. +func (p *Process) Background() (bool, error) { + return p.BackgroundWithContext(context.Background()) +} + +func (p *Process) BackgroundWithContext(ctx context.Context) (bool, error) { + fg, err := p.ForegroundWithContext(ctx) + if err != nil { + return false, err + } + return !fg, err +} + +// If interval is 0, return difference from last call(non-blocking). +// If interval > 0, wait interval sec and return difference between start and end. +func (p *Process) Percent(interval time.Duration) (float64, error) { + return p.PercentWithContext(context.Background(), interval) +} + +func (p *Process) PercentWithContext(ctx context.Context, interval time.Duration) (float64, error) { + cpuTimes, err := p.TimesWithContext(ctx) + if err != nil { + return 0, err + } + now := time.Now() + + if interval > 0 { + p.lastCPUTimes = cpuTimes + p.lastCPUTime = now + if err := common.Sleep(ctx, interval); err != nil { + return 0, err + } + cpuTimes, err = p.TimesWithContext(ctx) + now = time.Now() + if err != nil { + return 0, err + } + } else { + if p.lastCPUTimes == nil { + // invoked first time + p.lastCPUTimes = cpuTimes + p.lastCPUTime = now + return 0, nil + } + } + + numcpu := runtime.NumCPU() + delta := (now.Sub(p.lastCPUTime).Seconds()) * float64(numcpu) + ret := calculatePercent(p.lastCPUTimes, cpuTimes, delta, numcpu) + p.lastCPUTimes = cpuTimes + p.lastCPUTime = now + return ret, nil +} + +// IsRunning returns whether the process is still running or not. +func (p *Process) IsRunning() (bool, error) { + return p.IsRunningWithContext(context.Background()) +} + +func (p *Process) IsRunningWithContext(ctx context.Context) (bool, error) { + createTime, err := p.CreateTimeWithContext(ctx) + if err != nil { + return false, err + } + p2, err := NewProcessWithContext(ctx, p.Pid) + if errors.Is(err, ErrorProcessNotRunning) { + return false, nil + } + createTime2, err := p2.CreateTimeWithContext(ctx) + if err != nil { + return false, err + } + return createTime == createTime2, nil +} + +// CreateTime returns created time of the process in milliseconds since the epoch, in UTC. +func (p *Process) CreateTime() (int64, error) { + return p.CreateTimeWithContext(context.Background()) +} + +func (p *Process) CreateTimeWithContext(ctx context.Context) (int64, error) { + if p.createTime != 0 { + return p.createTime, nil + } + createTime, err := p.createTimeWithContext(ctx) + p.createTime = createTime + return p.createTime, err +} + +func calculatePercent(t1, t2 *cpu.TimesStat, delta float64, numcpu int) float64 { + if delta == 0 { + return 0 + } + delta_proc := t2.Total() - t1.Total() + overall_percent := ((delta_proc / delta) * 100) * float64(numcpu) + return overall_percent +} + +// MemoryPercent returns how many percent of the total RAM this process uses +func (p *Process) MemoryPercent() (float32, error) { + return p.MemoryPercentWithContext(context.Background()) +} + +func (p *Process) MemoryPercentWithContext(ctx context.Context) (float32, error) { + machineMemory, err := mem.VirtualMemoryWithContext(ctx) + if err != nil { + return 0, err + } + total := machineMemory.Total + + processMemory, err := p.MemoryInfoWithContext(ctx) + if err != nil { + return 0, err + } + used := processMemory.RSS + + return (100 * float32(used) / float32(total)), nil +} + +// CPUPercent returns how many percent of the CPU time this process uses +func (p *Process) CPUPercent() (float64, error) { + return p.CPUPercentWithContext(context.Background()) +} + +func (p *Process) CPUPercentWithContext(ctx context.Context) (float64, error) { + crt_time, err := p.createTimeWithContext(ctx) + if err != nil { + return 0, err + } + + cput, err := p.TimesWithContext(ctx) + if err != nil { + return 0, err + } + + created := time.Unix(0, crt_time*int64(time.Millisecond)) + totalTime := time.Since(created).Seconds() + if totalTime <= 0 { + return 0, nil + } + + return 100 * cput.Total() / totalTime, nil +} + +// Groups returns all group IDs(include supplementary groups) of the process as a slice of the int +func (p *Process) Groups() ([]uint32, error) { + return p.GroupsWithContext(context.Background()) +} + +// Ppid returns Parent Process ID of the process. +func (p *Process) Ppid() (int32, error) { + return p.PpidWithContext(context.Background()) +} + +// Name returns name of the process. +func (p *Process) Name() (string, error) { + return p.NameWithContext(context.Background()) +} + +// Exe returns executable path of the process. +func (p *Process) Exe() (string, error) { + return p.ExeWithContext(context.Background()) +} + +// Cmdline returns the command line arguments of the process as a string with +// each argument separated by 0x20 ascii character. +func (p *Process) Cmdline() (string, error) { + return p.CmdlineWithContext(context.Background()) +} + +// CmdlineSlice returns the command line arguments of the process as a slice with each +// element being an argument. +func (p *Process) CmdlineSlice() ([]string, error) { + return p.CmdlineSliceWithContext(context.Background()) +} + +// Cwd returns current working directory of the process. +func (p *Process) Cwd() (string, error) { + return p.CwdWithContext(context.Background()) +} + +// Parent returns parent Process of the process. +func (p *Process) Parent() (*Process, error) { + return p.ParentWithContext(context.Background()) +} + +// ParentWithContext returns parent Process of the process. +func (p *Process) ParentWithContext(ctx context.Context) (*Process, error) { + ppid, err := p.PpidWithContext(ctx) + if err != nil { + return nil, err + } + return NewProcessWithContext(ctx, ppid) +} + +// Status returns the process status. +// Return value could be one of these. +// R: Running S: Sleep T: Stop I: Idle +// Z: Zombie W: Wait L: Lock +// The character is same within all supported platforms. +func (p *Process) Status() ([]string, error) { + return p.StatusWithContext(context.Background()) +} + +// Foreground returns true if the process is in foreground, false otherwise. +func (p *Process) Foreground() (bool, error) { + return p.ForegroundWithContext(context.Background()) +} + +// Uids returns user ids of the process as a slice of the int +func (p *Process) Uids() ([]uint32, error) { + return p.UidsWithContext(context.Background()) +} + +// Gids returns group ids of the process as a slice of the int +func (p *Process) Gids() ([]uint32, error) { + return p.GidsWithContext(context.Background()) +} + +// Terminal returns a terminal which is associated with the process. +func (p *Process) Terminal() (string, error) { + return p.TerminalWithContext(context.Background()) +} + +// Nice returns a nice value (priority). +func (p *Process) Nice() (int32, error) { + return p.NiceWithContext(context.Background()) +} + +// IOnice returns process I/O nice value (priority). +func (p *Process) IOnice() (int32, error) { + return p.IOniceWithContext(context.Background()) +} + +// Rlimit returns Resource Limits. +func (p *Process) Rlimit() ([]RlimitStat, error) { + return p.RlimitWithContext(context.Background()) +} + +// RlimitUsage returns Resource Limits. +// If gatherUsed is true, the currently used value will be gathered and added +// to the resulting RlimitStat. +func (p *Process) RlimitUsage(gatherUsed bool) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(context.Background(), gatherUsed) +} + +// IOCounters returns IO Counters. +func (p *Process) IOCounters() (*IOCountersStat, error) { + return p.IOCountersWithContext(context.Background()) +} + +// NumCtxSwitches returns the number of the context switches of the process. +func (p *Process) NumCtxSwitches() (*NumCtxSwitchesStat, error) { + return p.NumCtxSwitchesWithContext(context.Background()) +} + +// NumFDs returns the number of File Descriptors used by the process. +func (p *Process) NumFDs() (int32, error) { + return p.NumFDsWithContext(context.Background()) +} + +// NumThreads returns the number of threads used by the process. +func (p *Process) NumThreads() (int32, error) { + return p.NumThreadsWithContext(context.Background()) +} + +func (p *Process) Threads() (map[int32]*cpu.TimesStat, error) { + return p.ThreadsWithContext(context.Background()) +} + +// Times returns CPU times of the process. +func (p *Process) Times() (*cpu.TimesStat, error) { + return p.TimesWithContext(context.Background()) +} + +// CPUAffinity returns CPU affinity of the process. +func (p *Process) CPUAffinity() ([]int32, error) { + return p.CPUAffinityWithContext(context.Background()) +} + +// MemoryInfo returns generic process memory information, +// such as RSS and VMS. +func (p *Process) MemoryInfo() (*MemoryInfoStat, error) { + return p.MemoryInfoWithContext(context.Background()) +} + +// MemoryInfoEx returns platform-specific process memory information. +func (p *Process) MemoryInfoEx() (*MemoryInfoExStat, error) { + return p.MemoryInfoExWithContext(context.Background()) +} + +// PageFaults returns the process's page fault counters. +func (p *Process) PageFaults() (*PageFaultsStat, error) { + return p.PageFaultsWithContext(context.Background()) +} + +// Children returns the children of the process represented as a slice +// of pointers to Process type. +func (p *Process) Children() ([]*Process, error) { + return p.ChildrenWithContext(context.Background()) +} + +// OpenFiles returns a slice of OpenFilesStat opend by the process. +// OpenFilesStat includes a file path and file descriptor. +func (p *Process) OpenFiles() ([]OpenFilesStat, error) { + return p.OpenFilesWithContext(context.Background()) +} + +// Connections returns a slice of net.ConnectionStat used by the process. +// This returns all kind of the connection. This means TCP, UDP or UNIX. +func (p *Process) Connections() ([]net.ConnectionStat, error) { + return p.ConnectionsWithContext(context.Background()) +} + +// ConnectionsMax returns a slice of net.ConnectionStat used by the process at most `max`. +func (p *Process) ConnectionsMax(max int) ([]net.ConnectionStat, error) { + return p.ConnectionsMaxWithContext(context.Background(), max) +} + +// MemoryMaps get memory maps from /proc/(pid)/smaps +func (p *Process) MemoryMaps(grouped bool) (*[]MemoryMapsStat, error) { + return p.MemoryMapsWithContext(context.Background(), grouped) +} + +// Tgid returns thread group id of the process. +func (p *Process) Tgid() (int32, error) { + return p.TgidWithContext(context.Background()) +} + +// SendSignal sends a unix.Signal to the process. +func (p *Process) SendSignal(sig Signal) error { + return p.SendSignalWithContext(context.Background(), sig) +} + +// Suspend sends SIGSTOP to the process. +func (p *Process) Suspend() error { + return p.SuspendWithContext(context.Background()) +} + +// Resume sends SIGCONT to the process. +func (p *Process) Resume() error { + return p.ResumeWithContext(context.Background()) +} + +// Terminate sends SIGTERM to the process. +func (p *Process) Terminate() error { + return p.TerminateWithContext(context.Background()) +} + +// Kill sends SIGKILL to the process. +func (p *Process) Kill() error { + return p.KillWithContext(context.Background()) +} + +// Username returns a username of the process. +func (p *Process) Username() (string, error) { + return p.UsernameWithContext(context.Background()) +} + +// Environ returns the environment variables of the process. +func (p *Process) Environ() ([]string, error) { + return p.EnvironWithContext(context.Background()) +} + +// convertStatusChar as reported by the ps command across different platforms. +func convertStatusChar(letter string) string { + // Sources + // Darwin: http://www.mywebuniversity.com/Man_Pages/Darwin/man_ps.html + // FreeBSD: https://www.freebsd.org/cgi/man.cgi?ps + // Linux https://man7.org/linux/man-pages/man1/ps.1.html + // OpenBSD: https://man.openbsd.org/ps.1#state + // Solaris: https://github.com/collectd/collectd/blob/1da3305c10c8ff9a63081284cf3d4bb0f6daffd8/src/processes.c#L2115 + switch letter { + case "A": + return Daemon + case "D", "U": + return Blocked + case "E": + return Detached + case "I": + return Idle + case "L": + return Lock + case "O": + return Orphan + case "R": + return Running + case "S": + return Sleep + case "T", "t": + // "t" is used by Linux to signal stopped by the debugger during tracing + return Stop + case "W": + return Wait + case "Y": + return System + case "Z": + return Zombie + default: + return UnknownState + } +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go new file mode 100644 index 0000000000000..dcc056101a0a4 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_bsd.go @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin || freebsd || openbsd + +package process + +import ( + "bytes" + "context" + "encoding/binary" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" +) + +type MemoryInfoExStat struct{} + +type MemoryMapsStat struct{} + +func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} + +func parseKinfoProc(buf []byte) (KinfoProc, error) { + var k KinfoProc + br := bytes.NewReader(buf) + err := common.Read(br, binary.LittleEndian, &k) + return k, err +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go new file mode 100644 index 0000000000000..5231007c3c377 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin.go @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin + +package process + +import ( + "context" + "fmt" + "path/filepath" + "strconv" + "strings" + + "github.com/tklauser/go-sysconf" + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" +) + +// copied from sys/sysctl.h +const ( + CTLKern = 1 // "high kernel": proc, limits + KernProc = 14 // struct: process entries + KernProcPID = 1 // by process id + KernProcProc = 8 // only return procs + KernProcAll = 0 // everything + KernProcPathname = 12 // path to executable +) + +var clockTicks = 100 // default value + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + clockTicks = int(clkTck) + } +} + +type _Ctype_struct___0 struct { + Pad uint64 +} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + var ret []int32 + + kprocs, err := unix.SysctlKinfoProcSlice("kern.proc.all") + if err != nil { + return ret, err + } + + for _, proc := range kprocs { + ret = append(ret, int32(proc.Proc.P_pid)) + } + + return ret, nil +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Eproc.Ppid, nil +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + name := common.ByteToString(k.Proc.P_comm[:]) + + if len(name) >= 15 { + cmdName, err := p.cmdNameWithContext(ctx) + if err != nil { + return "", err + } + if len(cmdName) > 0 { + extendedName := filepath.Base(cmdName) + if strings.HasPrefix(extendedName, p.name) { + name = extendedName + } + } + } + + return name, nil +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Proc.P_starttime.Sec*1000 + int64(k.Proc.P_starttime.Usec)/1000, nil +} + +func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { + r, err := callPsWithContext(ctx, "state", p.Pid, false, false) + if err != nil { + return []string{""}, err + } + status := convertStatusChar(r[0][0][0:1]) + return []string{status}, err +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid))) + if err != nil { + return false, err + } + return strings.IndexByte(string(out), '+') != -1, nil +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + // See: http://unix.superglobalmegacorp.com/Net2/newsrc/sys/ucred.h.html + userEffectiveUID := uint32(k.Eproc.Ucred.Uid) + + return []uint32{userEffectiveUID}, nil +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Eproc.Pcred.P_rgid), uint32(k.Eproc.Pcred.P_rgid), uint32(k.Eproc.Pcred.P_svgid)) + + return gids, nil +} + +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError + // k, err := p.getKProc() + // if err != nil { + // return nil, err + // } + + // groups := make([]int32, k.Eproc.Ucred.Ngroups) + // for i := int16(0); i < k.Eproc.Ucred.Ngroups; i++ { + // groups[i] = int32(k.Eproc.Ucred.Groups[i]) + // } + + // return groups, nil +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError + /* + k, err := p.getKProc() + if err != nil { + return "", err + } + + ttyNr := uint64(k.Eproc.Tdev) + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + + return termmap[ttyNr], nil + */ +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + return int32(k.Proc.P_nice), nil +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func convertCPUTimes(s string) (ret float64, err error) { + var t int + var _tmp string + if strings.Contains(s, ":") { + _t := strings.Split(s, ":") + switch len(_t) { + case 3: + hour, err := strconv.Atoi(_t[0]) + if err != nil { + return ret, err + } + t += hour * 60 * 60 * clockTicks + + mins, err := strconv.Atoi(_t[1]) + if err != nil { + return ret, err + } + t += mins * 60 * clockTicks + _tmp = _t[2] + case 2: + mins, err := strconv.Atoi(_t[0]) + if err != nil { + return ret, err + } + t += mins * 60 * clockTicks + _tmp = _t[1] + case 1, 0: + _tmp = s + default: + return ret, fmt.Errorf("wrong cpu time string") + } + } else { + _tmp = s + } + + _t := strings.Split(_tmp, ".") + if err != nil { + return ret, err + } + h, err := strconv.Atoi(_t[0]) + t += h * clockTicks + h, err = strconv.Atoi(_t[1]) + t += h + return float64(t) / float64(clockTicks), nil +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + if err != nil { + return nil, err + } + ret := make([]*Process, 0, len(pids)) + for _, pid := range pids { + np, err := NewProcessWithContext(ctx, pid) + if err != nil { + return nil, err + } + ret = append(ret, np) + } + return ret, nil +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return net.ConnectionsPidWithContext(ctx, "all", p.Pid) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, err + } + + for _, pid := range pids { + p, err := NewProcessWithContext(ctx, pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +// Returns a proc as defined here: +// http://unix.superglobalmegacorp.com/Net2/newsrc/sys/kinfo_proc.h.html +func (p *Process) getKProc() (*unix.KinfoProc, error) { + return unix.SysctlKinfoProc("kern.proc.pid", int(p.Pid)) +} + +// call ps command. +// Return value deletes Header line(you must not input wrong arg). +// And splited by Space. Caller have responsibility to manage. +// If passed arg pid is 0, get information from all process. +func callPsWithContext(ctx context.Context, arg string, pid int32, threadOption bool, nameOption bool) ([][]string, error) { + var cmd []string + if pid == 0 { // will get from all processes. + cmd = []string{"-ax", "-o", arg} + } else if threadOption { + cmd = []string{"-x", "-o", arg, "-M", "-p", strconv.Itoa(int(pid))} + } else { + cmd = []string{"-x", "-o", arg, "-p", strconv.Itoa(int(pid))} + } + if nameOption { + cmd = append(cmd, "-c") + } + out, err := invoke.CommandWithContext(ctx, "ps", cmd...) + if err != nil { + return [][]string{}, err + } + lines := strings.Split(string(out), "\n") + + var ret [][]string + for _, l := range lines[1:] { + var lr []string + if nameOption { + lr = append(lr, l) + } else { + for _, r := range strings.Split(l, " ") { + if r == "" { + continue + } + lr = append(lr, strings.TrimSpace(r)) + } + } + if len(lr) != 0 { + ret = append(ret, lr) + } + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go new file mode 100644 index 0000000000000..a13522473a1c3 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_amd64.go @@ -0,0 +1,237 @@ +// SPDX-License-Identifier: BSD-3-Clause +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_darwin.go + +package process + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type UGid_t uint32 + +type KinfoProc struct { + Proc ExternProc + Eproc Eproc +} + +type Eproc struct { + Paddr *uint64 + Sess *Session + Pcred Upcred + Ucred Uucred + Pad_cgo_0 [4]byte + Vm Vmspace + Ppid int32 + Pgid int32 + Jobc int16 + Pad_cgo_1 [2]byte + Tdev int32 + Tpgid int32 + Pad_cgo_2 [4]byte + Tsess *Session + Wmesg [8]int8 + Xsize int32 + Xrssize int16 + Xccount int16 + Xswrss int16 + Pad_cgo_3 [2]byte + Flag int32 + Login [12]int8 + Spare [4]int32 + Pad_cgo_4 [4]byte +} + +type Proc struct{} + +type Session struct{} + +type ucred struct { + Link _Ctype_struct___0 + Ref uint64 + Posix Posix_cred + Label *Label + Audit Au_session +} + +type Uucred struct { + Ref int32 + UID uint32 + Ngroups int16 + Pad_cgo_0 [2]byte + Groups [16]uint32 +} + +type Upcred struct { + Pc_lock [72]int8 + Pc_ucred *ucred + P_ruid uint32 + P_svuid uint32 + P_rgid uint32 + P_svgid uint32 + P_refcnt int32 + Pad_cgo_0 [4]byte +} + +type Vmspace struct { + Dummy int32 + Pad_cgo_0 [4]byte + Dummy2 *int8 + Dummy3 [5]int32 + Pad_cgo_1 [4]byte + Dummy4 [3]*int8 +} + +type Sigacts struct{} + +type ExternProc struct { + P_un [16]byte + P_vmspace uint64 + P_sigacts uint64 + Pad_cgo_0 [3]byte + P_flag int32 + P_stat int8 + P_pid int32 + P_oppid int32 + P_dupfd int32 + Pad_cgo_1 [4]byte + User_stack uint64 + Exit_thread uint64 + P_debugger int32 + Sigwait int32 + P_estcpu uint32 + P_cpticks int32 + P_pctcpu uint32 + Pad_cgo_2 [4]byte + P_wchan uint64 + P_wmesg uint64 + P_swtime uint32 + P_slptime uint32 + P_realtimer Itimerval + P_rtime Timeval + P_uticks uint64 + P_sticks uint64 + P_iticks uint64 + P_traceflag int32 + Pad_cgo_3 [4]byte + P_tracep uint64 + P_siglist int32 + Pad_cgo_4 [4]byte + P_textvp uint64 + P_holdcnt int32 + P_sigmask uint32 + P_sigignore uint32 + P_sigcatch uint32 + P_priority uint8 + P_usrpri uint8 + P_nice int8 + P_comm [17]int8 + Pad_cgo_5 [4]byte + P_pgrp uint64 + P_addr uint64 + P_xstat uint16 + P_acflag uint16 + Pad_cgo_6 [4]byte + P_ru uint64 +} + +type Itimerval struct { + Interval Timeval + Value Timeval +} + +type Vnode struct{} + +type Pgrp struct{} + +type UserStruct struct{} + +type Au_session struct { + Aia_p *AuditinfoAddr + Mask AuMask +} + +type Posix_cred struct { + UID uint32 + Ruid uint32 + Svuid uint32 + Ngroups int16 + Pad_cgo_0 [2]byte + Groups [16]uint32 + Rgid uint32 + Svgid uint32 + Gmuid uint32 + Flags int32 +} + +type Label struct{} + +type AuditinfoAddr struct { + Auid uint32 + Mask AuMask + Termid AuTidAddr + Asid int32 + Flags uint64 +} + +type AuMask struct { + Success uint32 + Failure uint32 +} + +type AuTidAddr struct { + Port int32 + Type uint32 + Addr [4]uint32 +} + +type UcredQueue struct { + Next *ucred + Prev **ucred +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go new file mode 100644 index 0000000000000..f1f3df365d919 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_arm64.go @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && arm64 + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs process/types_darwin.go + +package process + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int32 + Pad_cgo_0 [4]byte +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type UGid_t uint32 + +type KinfoProc struct { + Proc ExternProc + Eproc Eproc +} + +type Eproc struct { + Paddr *Proc + Sess *Session + Pcred Upcred + Ucred Uucred + Vm Vmspace + Ppid int32 + Pgid int32 + Jobc int16 + Tdev int32 + Tpgid int32 + Tsess *Session + Wmesg [8]int8 + Xsize int32 + Xrssize int16 + Xccount int16 + Xswrss int16 + Flag int32 + Login [12]int8 + Spare [4]int32 + Pad_cgo_0 [4]byte +} + +type Proc struct{} + +type Session struct{} + +type ucred struct{} + +type Uucred struct { + Ref int32 + UID uint32 + Ngroups int16 + Groups [16]uint32 +} + +type Upcred struct { + Pc_lock [72]int8 + Pc_ucred *ucred + P_ruid uint32 + P_svuid uint32 + P_rgid uint32 + P_svgid uint32 + P_refcnt int32 + Pad_cgo_0 [4]byte +} + +type Vmspace struct { + Dummy int32 + Dummy2 *int8 + Dummy3 [5]int32 + Dummy4 [3]*int8 +} + +type Sigacts struct{} + +type ExternProc struct { + P_un [16]byte + P_vmspace uint64 + P_sigacts uint64 + Pad_cgo_0 [3]byte + P_flag int32 + P_stat int8 + P_pid int32 + P_oppid int32 + P_dupfd int32 + Pad_cgo_1 [4]byte + User_stack uint64 + Exit_thread uint64 + P_debugger int32 + Sigwait int32 + P_estcpu uint32 + P_cpticks int32 + P_pctcpu uint32 + Pad_cgo_2 [4]byte + P_wchan uint64 + P_wmesg uint64 + P_swtime uint32 + P_slptime uint32 + P_realtimer Itimerval + P_rtime Timeval + P_uticks uint64 + P_sticks uint64 + P_iticks uint64 + P_traceflag int32 + Pad_cgo_3 [4]byte + P_tracep uint64 + P_siglist int32 + Pad_cgo_4 [4]byte + P_textvp uint64 + P_holdcnt int32 + P_sigmask uint32 + P_sigignore uint32 + P_sigcatch uint32 + P_priority uint8 + P_usrpri uint8 + P_nice int8 + P_comm [17]int8 + Pad_cgo_5 [4]byte + P_pgrp uint64 + P_addr uint64 + P_xstat uint16 + P_acflag uint16 + Pad_cgo_6 [4]byte + P_ru uint64 +} + +type Itimerval struct { + Interval Timeval + Value Timeval +} + +type Vnode struct{} + +type Pgrp struct{} + +type UserStruct struct{} + +type Au_session struct { + Aia_p *AuditinfoAddr + Mask AuMask +} + +type Posix_cred struct{} + +type Label struct{} + +type AuditinfoAddr struct { + Auid uint32 + Mask AuMask + Termid AuTidAddr + Asid int32 + Flags uint64 +} +type AuMask struct { + Success uint32 + Failure uint32 +} +type AuTidAddr struct { + Port int32 + Type uint32 + Addr [4]uint32 +} + +type UcredQueue struct { + Next *ucred + Prev **ucred +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_cgo.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_cgo.go new file mode 100644 index 0000000000000..bbdfc963ebbee --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_cgo.go @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && cgo + +package process + +// #include +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +import ( + "bytes" + "context" + "fmt" + "strings" + "syscall" + "unsafe" + + "github.com/shirou/gopsutil/v4/cpu" +) + +var ( + argMax int + timescaleToNanoSeconds float64 +) + +func init() { + argMax = getArgMax() + timescaleToNanoSeconds = getTimeScaleToNanoSeconds() +} + +func getArgMax() int { + var ( + mib = [...]C.int{C.CTL_KERN, C.KERN_ARGMAX} + argmax C.int + size C.size_t = C.ulong(unsafe.Sizeof(argmax)) + ) + retval := C.sysctl(&mib[0], 2, unsafe.Pointer(&argmax), &size, C.NULL, 0) + if retval == 0 { + return int(argmax) + } + return 0 +} + +func getTimeScaleToNanoSeconds() float64 { + var timeBaseInfo C.struct_mach_timebase_info + + C.mach_timebase_info(&timeBaseInfo) + + return float64(timeBaseInfo.numer) / float64(timeBaseInfo.denom) +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + var c C.char // need a var for unsafe.Sizeof need a var + const bufsize = C.PROC_PIDPATHINFO_MAXSIZE * unsafe.Sizeof(c) + buffer := (*C.char)(C.malloc(C.size_t(bufsize))) + defer C.free(unsafe.Pointer(buffer)) + + ret, err := C.proc_pidpath(C.int(p.Pid), unsafe.Pointer(buffer), C.uint32_t(bufsize)) + if err != nil { + return "", err + } + if ret <= 0 { + return "", fmt.Errorf("unknown error: proc_pidpath returned %d", ret) + } + + return C.GoString(buffer), nil +} + +// CwdWithContext retrieves the Current Working Directory for the given process. +// It uses the proc_pidinfo from libproc and will only work for processes the +// EUID can access. Otherwise "operation not permitted" will be returned as the +// error. +// Note: This might also work for other *BSD OSs. +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + const vpiSize = C.sizeof_struct_proc_vnodepathinfo + vpi := (*C.struct_proc_vnodepathinfo)(C.malloc(vpiSize)) + defer C.free(unsafe.Pointer(vpi)) + ret, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDVNODEPATHINFO, 0, unsafe.Pointer(vpi), vpiSize) + if err != nil { + // fmt.Printf("ret: %d %T\n", ret, err) + if err == syscall.EPERM { + return "", ErrorNotPermitted + } + return "", err + } + if ret <= 0 { + return "", fmt.Errorf("unknown error: proc_pidinfo returned %d", ret) + } + if ret != C.sizeof_struct_proc_vnodepathinfo { + return "", fmt.Errorf("too few bytes; expected %d, got %d", vpiSize, ret) + } + return C.GoString(&vpi.pvi_cdir.vip_path[0]), err +} + +func procArgs(pid int32) ([]byte, int, error) { + var ( + mib = [...]C.int{C.CTL_KERN, C.KERN_PROCARGS2, C.int(pid)} + size C.size_t = C.ulong(argMax) + nargs C.int + result []byte + ) + procargs := (*C.char)(C.malloc(C.ulong(argMax))) + defer C.free(unsafe.Pointer(procargs)) + retval, err := C.sysctl(&mib[0], 3, unsafe.Pointer(procargs), &size, C.NULL, 0) + if retval == 0 { + C.memcpy(unsafe.Pointer(&nargs), unsafe.Pointer(procargs), C.sizeof_int) + result = C.GoBytes(unsafe.Pointer(procargs), C.int(size)) + // fmt.Printf("size: %d %d\n%s\n", size, nargs, hex.Dump(result)) + return result, int(nargs), nil + } + return nil, 0, err +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + return p.cmdlineSliceWithContext(ctx, true) +} + +func (p *Process) cmdlineSliceWithContext(ctx context.Context, fallback bool) ([]string, error) { + pargs, nargs, err := procArgs(p.Pid) + if err != nil { + return nil, err + } + // The first bytes hold the nargs int, skip it. + args := bytes.Split((pargs)[C.sizeof_int:], []byte{0}) + var argStr string + // The first element is the actual binary/command path. + // command := args[0] + var argSlice []string + // var envSlice []string + // All other, non-zero elements are arguments. The first "nargs" elements + // are the arguments. Everything else in the slice is then the environment + // of the process. + for _, arg := range args[1:] { + argStr = string(arg[:]) + if len(argStr) > 0 { + if nargs > 0 { + argSlice = append(argSlice, argStr) + nargs-- + continue + } + break + // envSlice = append(envSlice, argStr) + } + } + return argSlice, err +} + +// cmdNameWithContext returns the command name (including spaces) without any arguments +func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) { + r, err := p.cmdlineSliceWithContext(ctx, false) + if err != nil { + return "", err + } + + if len(r) == 0 { + return "", nil + } + + return r[0], err +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + r, err := p.CmdlineSliceWithContext(ctx) + if err != nil { + return "", err + } + return strings.Join(r, " "), err +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + const tiSize = C.sizeof_struct_proc_taskinfo + ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) + defer C.free(unsafe.Pointer(ti)) + + _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) + if err != nil { + return 0, err + } + + return int32(ti.pti_threadnum), nil +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + const tiSize = C.sizeof_struct_proc_taskinfo + ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) + defer C.free(unsafe.Pointer(ti)) + + _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) + if err != nil { + return nil, err + } + + ret := &cpu.TimesStat{ + CPU: "cpu", + User: float64(ti.pti_total_user) * timescaleToNanoSeconds / 1e9, + System: float64(ti.pti_total_system) * timescaleToNanoSeconds / 1e9, + } + return ret, nil +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + const tiSize = C.sizeof_struct_proc_taskinfo + ti := (*C.struct_proc_taskinfo)(C.malloc(tiSize)) + defer C.free(unsafe.Pointer(ti)) + + _, err := C.proc_pidinfo(C.int(p.Pid), C.PROC_PIDTASKINFO, 0, unsafe.Pointer(ti), tiSize) + if err != nil { + return nil, err + } + + ret := &MemoryInfoStat{ + RSS: uint64(ti.pti_resident_size), + VMS: uint64(ti.pti_virtual_size), + Swap: uint64(ti.pti_pageins), + } + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_nocgo.go b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_nocgo.go new file mode 100644 index 0000000000000..090e21e0c76cb --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_darwin_nocgo.go @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build darwin && !cgo + +package process + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" +) + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + out, err := invoke.CommandWithContext(ctx, "lsof", "-p", strconv.Itoa(int(p.Pid)), "-Fpfn") + if err != nil { + return "", fmt.Errorf("bad call to lsof: %s", err) + } + txtFound := 0 + lines := strings.Split(string(out), "\n") + for i := 1; i < len(lines); i++ { + if lines[i] == "ftxt" { + txtFound++ + if txtFound == 2 { + return lines[i-1][1:], nil + } + } + } + return "", fmt.Errorf("missing txt data returned by lsof") +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + r, err := callPsWithContext(ctx, "command", p.Pid, false, false) + if err != nil { + return "", err + } + return strings.Join(r[0], " "), err +} + +func (p *Process) cmdNameWithContext(ctx context.Context) (string, error) { + r, err := callPsWithContext(ctx, "command", p.Pid, false, true) + if err != nil { + return "", err + } + if len(r) > 0 && len(r[0]) > 0 { + return r[0][0], err + } + + return "", err +} + +// CmdlineSliceWithContext returns the command line arguments of the process as a slice with each +// element being an argument. Because of current deficiencies in the way that the command +// line arguments are found, single arguments that have spaces in the will actually be +// reported as two separate items. In order to do something better CGO would be needed +// to use the native darwin functions. +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + r, err := callPsWithContext(ctx, "command", p.Pid, false, false) + if err != nil { + return nil, err + } + return r[0], err +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + r, err := callPsWithContext(ctx, "utime,stime", p.Pid, true, false) + if err != nil { + return 0, err + } + return int32(len(r)), nil +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + r, err := callPsWithContext(ctx, "utime,stime", p.Pid, false, false) + if err != nil { + return nil, err + } + + utime, err := convertCPUTimes(r[0][0]) + if err != nil { + return nil, err + } + stime, err := convertCPUTimes(r[0][1]) + if err != nil { + return nil, err + } + + ret := &cpu.TimesStat{ + CPU: "cpu", + User: utime, + System: stime, + } + return ret, nil +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + r, err := callPsWithContext(ctx, "rss,vsize,pagein", p.Pid, false, false) + if err != nil { + return nil, err + } + rss, err := strconv.Atoi(r[0][0]) + if err != nil { + return nil, err + } + vms, err := strconv.Atoi(r[0][1]) + if err != nil { + return nil, err + } + pagein, err := strconv.Atoi(r[0][2]) + if err != nil { + return nil, err + } + + ret := &MemoryInfoStat{ + RSS: uint64(rss) * 1024, + VMS: uint64(vms) * 1024, + Swap: uint64(pagein), + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go b/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go new file mode 100644 index 0000000000000..23793e92c506b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_fallback.go @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build !darwin && !linux && !freebsd && !openbsd && !windows && !solaris && !plan9 + +package process + +import ( + "context" + "syscall" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" +) + +type Signal = syscall.Signal + +type MemoryMapsStat struct { + Path string `json:"path"` + Rss uint64 `json:"rss"` + Size uint64 `json:"size"` + Pss uint64 `json:"pss"` + SharedClean uint64 `json:"sharedClean"` + SharedDirty uint64 `json:"sharedDirty"` + PrivateClean uint64 `json:"privateClean"` + PrivateDirty uint64 `json:"privateDirty"` + Referenced uint64 `json:"referenced"` + Anonymous uint64 `json:"anonymous"` + Swap uint64 `json:"swap"` +} + +type MemoryInfoExStat struct{} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { + return []string{""}, common.ErrNotImplementedError +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) SendSignalWithContext(ctx context.Context, sig Signal) error { + return common.ErrNotImplementedError +} + +func (p *Process) SuspendWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) ResumeWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) TerminateWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) KillWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go new file mode 100644 index 0000000000000..3d21183d6561e --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd.go @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build freebsd + +package process + +import ( + "bytes" + "context" + "path/filepath" + "strconv" + "strings" + + cpu "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + net "github.com/shirou/gopsutil/v4/net" + "golang.org/x/sys/unix" +) + +func pidsWithContext(ctx context.Context) ([]int32, error) { + var ret []int32 + procs, err := ProcessesWithContext(ctx) + if err != nil { + return ret, nil + } + + for _, p := range procs { + ret = append(ret, p.Pid) + } + + return ret, nil +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Ppid, nil +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + name := common.IntToString(k.Comm[:]) + + if len(name) >= 15 { + cmdlineSlice, err := p.CmdlineSliceWithContext(ctx) + if err != nil { + return "", err + } + if len(cmdlineSlice) > 0 { + extendedName := filepath.Base(cmdlineSlice[0]) + if strings.HasPrefix(extendedName, p.name) { + name = extendedName + } + } + } + + return name, nil +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + mib := []int32{CTLKern, KernProc, KernProcPathname, p.Pid} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return "", err + } + + return strings.Trim(string(buf), "\x00"), nil +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + mib := []int32{CTLKern, KernProc, KernProcArgs, p.Pid} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return "", err + } + ret := strings.FieldsFunc(string(buf), func(r rune) bool { + if r == '\u0000' { + return true + } + return false + }) + + return strings.Join(ret, " "), nil +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + mib := []int32{CTLKern, KernProc, KernProcArgs, p.Pid} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return nil, err + } + if len(buf) == 0 { + return nil, nil + } + if buf[len(buf)-1] == 0 { + buf = buf[:len(buf)-1] + } + parts := bytes.Split(buf, []byte{0}) + var strParts []string + for _, p := range parts { + strParts = append(strParts, string(p)) + } + + return strParts, nil +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + return int64(k.Start.Sec)*1000 + int64(k.Start.Usec)/1000, nil +} + +func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { + k, err := p.getKProc() + if err != nil { + return []string{""}, err + } + var s string + switch k.Stat { + case SIDL: + s = Idle + case SRUN: + s = Running + case SSLEEP: + s = Sleep + case SSTOP: + s = Stop + case SZOMB: + s = Zombie + case SWAIT: + s = Wait + case SLOCK: + s = Lock + } + + return []string{s}, nil +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid))) + if err != nil { + return false, err + } + return strings.IndexByte(string(out), '+') != -1, nil +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + uids := make([]uint32, 0, 3) + + uids = append(uids, uint32(k.Ruid), uint32(k.Uid), uint32(k.Svuid)) + + return uids, nil +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Rgid), uint32(k.Ngroups), uint32(k.Svgid)) + + return gids, nil +} + +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + groups := make([]uint32, k.Ngroups) + for i := int16(0); i < k.Ngroups; i++ { + groups[i] = uint32(k.Groups[i]) + } + + return groups, nil +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + ttyNr := uint64(k.Tdev) + + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + + return termmap[ttyNr], nil +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + return int32(k.Nice), nil +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + return &IOCountersStat{ + ReadCount: uint64(k.Rusage.Inblock), + WriteCount: uint64(k.Rusage.Oublock), + }, nil +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Numthreads, nil +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + return &cpu.TimesStat{ + CPU: "cpu", + User: float64(k.Rusage.Utime.Sec) + float64(k.Rusage.Utime.Usec)/1000000, + System: float64(k.Rusage.Stime.Sec) + float64(k.Rusage.Stime.Usec)/1000000, + }, nil +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + v, err := unix.Sysctl("vm.stats.vm.v_page_size") + if err != nil { + return nil, err + } + pageSize := common.LittleEndian.Uint16([]byte(v)) + + return &MemoryInfoStat{ + RSS: uint64(k.Rssize) * uint64(pageSize), + VMS: uint64(k.Size), + }, nil +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + if err != nil { + return nil, err + } + ret := make([]*Process, 0, len(pids)) + for _, pid := range pids { + np, err := NewProcessWithContext(ctx, pid) + if err != nil { + return nil, err + } + ret = append(ret, np) + } + return ret, nil +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return net.ConnectionsPidWithContext(ctx, "all", p.Pid) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + results := []*Process{} + + mib := []int32{CTLKern, KernProc, KernProcProc, 0} + buf, length, err := common.CallSyscall(mib) + if err != nil { + return results, err + } + + // get kinfo_proc size + count := int(length / uint64(sizeOfKinfoProc)) + + // parse buf to procs + for i := 0; i < count; i++ { + b := buf[i*sizeOfKinfoProc : (i+1)*sizeOfKinfoProc] + k, err := parseKinfoProc(b) + if err != nil { + continue + } + p, err := NewProcessWithContext(ctx, int32(k.Pid)) + if err != nil { + continue + } + + results = append(results, p) + } + + return results, nil +} + +func (p *Process) getKProc() (*KinfoProc, error) { + mib := []int32{CTLKern, KernProc, KernProcPID, p.Pid} + + buf, length, err := common.CallSyscall(mib) + if err != nil { + return nil, err + } + if length != sizeOfKinfoProc { + return nil, err + } + + k, err := parseKinfoProc(buf) + if err != nil { + return nil, err + } + return &k, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go new file mode 100644 index 0000000000000..279ba9fbb40f8 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_386.go @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: BSD-3-Clause +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 14 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 7 +) + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x488 + sizeOfKinfoProc = 0x300 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SWAIT = 6 + SLOCK = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int32 + Nsec int32 +} + +type Timeval struct { + Sec int32 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type KinfoProc struct { + Structsize int32 + Layout int32 + Args int32 /* pargs */ + Paddr int32 /* proc */ + Addr int32 /* user */ + Tracep int32 /* vnode */ + Textvp int32 /* vnode */ + Fd int32 /* filedesc */ + Vmspace int32 /* vmspace */ + Wchan int32 + Pid int32 + Ppid int32 + Pgid int32 + Tpgid int32 + Sid int32 + Tsid int32 + Jobc int16 + Spare_short1 int16 + Tdev uint32 + Siglist [16]byte /* sigset */ + Sigmask [16]byte /* sigset */ + Sigignore [16]byte /* sigset */ + Sigcatch [16]byte /* sigset */ + Uid uint32 + Ruid uint32 + Svuid uint32 + Rgid uint32 + Svgid uint32 + Ngroups int16 + Spare_short2 int16 + Groups [16]uint32 + Size uint32 + Rssize int32 + Swrss int32 + Tsize int32 + Dsize int32 + Ssize int32 + Xstat uint16 + Acflag uint16 + Pctcpu uint32 + Estcpu uint32 + Slptime uint32 + Swtime uint32 + Cow uint32 + Runtime uint64 + Start Timeval + Childtime Timeval + Flag int32 + Kiflag int32 + Traceflag int32 + Stat int8 + Nice int8 + Lock int8 + Rqindex int8 + Oncpu uint8 + Lastcpu uint8 + Tdname [17]int8 + Wmesg [9]int8 + Login [18]int8 + Lockname [9]int8 + Comm [20]int8 + Emul [17]int8 + Loginclass [18]int8 + Sparestrings [50]int8 + Spareints [7]int32 + Flag2 int32 + Fibnum int32 + Cr_flags uint32 + Jid int32 + Numthreads int32 + Tid int32 + Pri Priority + Rusage Rusage + Rusage_ch Rusage + Pcb int32 /* pcb */ + Kstack int32 + Udata int32 + Tdaddr int32 /* thread */ + Spareptrs [6]int32 + Sparelongs [12]int32 + Sflag int32 + Tdflags int32 +} + +type Priority struct { + Class uint8 + Level uint8 + Native uint8 + User uint8 +} + +type KinfoVmentry struct { + Structsize int32 + Type int32 + Start uint64 + End uint64 + Offset uint64 + Vn_fileid uint64 + Vn_fsid uint32 + Flags int32 + Resident int32 + Private_resident int32 + Protection int32 + Ref_count int32 + Shadow_count int32 + Vn_type int32 + Vn_size uint64 + Vn_rdev uint32 + Vn_mode uint16 + Status uint16 + X_kve_ispare [12]int32 + Path [1024]int8 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go new file mode 100644 index 0000000000000..f3b70ec1bec86 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_amd64.go @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: BSD-3-Clause +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 14 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 7 +) + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x488 + sizeOfKinfoProc = 0x440 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SWAIT = 6 + SLOCK = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type KinfoProc struct { + Structsize int32 + Layout int32 + Args int64 /* pargs */ + Paddr int64 /* proc */ + Addr int64 /* user */ + Tracep int64 /* vnode */ + Textvp int64 /* vnode */ + Fd int64 /* filedesc */ + Vmspace int64 /* vmspace */ + Wchan int64 + Pid int32 + Ppid int32 + Pgid int32 + Tpgid int32 + Sid int32 + Tsid int32 + Jobc int16 + Spare_short1 int16 + Tdev uint32 + Siglist [16]byte /* sigset */ + Sigmask [16]byte /* sigset */ + Sigignore [16]byte /* sigset */ + Sigcatch [16]byte /* sigset */ + Uid uint32 + Ruid uint32 + Svuid uint32 + Rgid uint32 + Svgid uint32 + Ngroups int16 + Spare_short2 int16 + Groups [16]uint32 + Size uint64 + Rssize int64 + Swrss int64 + Tsize int64 + Dsize int64 + Ssize int64 + Xstat uint16 + Acflag uint16 + Pctcpu uint32 + Estcpu uint32 + Slptime uint32 + Swtime uint32 + Cow uint32 + Runtime uint64 + Start Timeval + Childtime Timeval + Flag int64 + Kiflag int64 + Traceflag int32 + Stat int8 + Nice int8 + Lock int8 + Rqindex int8 + Oncpu uint8 + Lastcpu uint8 + Tdname [17]int8 + Wmesg [9]int8 + Login [18]int8 + Lockname [9]int8 + Comm [20]int8 + Emul [17]int8 + Loginclass [18]int8 + Sparestrings [50]int8 + Spareints [7]int32 + Flag2 int32 + Fibnum int32 + Cr_flags uint32 + Jid int32 + Numthreads int32 + Tid int32 + Pri Priority + Rusage Rusage + Rusage_ch Rusage + Pcb int64 /* pcb */ + Kstack int64 + Udata int64 + Tdaddr int64 /* thread */ + Spareptrs [6]int64 + Sparelongs [12]int64 + Sflag int64 + Tdflags int64 +} + +type Priority struct { + Class uint8 + Level uint8 + Native uint8 + User uint8 +} + +type KinfoVmentry struct { + Structsize int32 + Type int32 + Start uint64 + End uint64 + Offset uint64 + Vn_fileid uint64 + Vn_fsid uint32 + Flags int32 + Resident int32 + Private_resident int32 + Protection int32 + Ref_count int32 + Shadow_count int32 + Vn_type int32 + Vn_size uint64 + Vn_rdev uint32 + Vn_mode uint16 + Status uint16 + X_kve_ispare [12]int32 + Path [1024]int8 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go new file mode 100644 index 0000000000000..75ed30630502b --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm.go @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: BSD-3-Clause +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_freebsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 14 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 7 +) + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x488 + sizeOfKinfoProc = 0x440 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SWAIT = 6 + SLOCK = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur int32 + Max int32 +} + +type KinfoProc struct { + Structsize int32 + Layout int32 + Args int32 /* pargs */ + Paddr int32 /* proc */ + Addr int32 /* user */ + Tracep int32 /* vnode */ + Textvp int32 /* vnode */ + Fd int32 /* filedesc */ + Vmspace int32 /* vmspace */ + Wchan int32 + Pid int32 + Ppid int32 + Pgid int32 + Tpgid int32 + Sid int32 + Tsid int32 + Jobc int16 + Spare_short1 int16 + Tdev uint32 + Siglist [16]byte /* sigset */ + Sigmask [16]byte /* sigset */ + Sigignore [16]byte /* sigset */ + Sigcatch [16]byte /* sigset */ + Uid uint32 + Ruid uint32 + Svuid uint32 + Rgid uint32 + Svgid uint32 + Ngroups int16 + Spare_short2 int16 + Groups [16]uint32 + Size uint32 + Rssize int32 + Swrss int32 + Tsize int32 + Dsize int32 + Ssize int32 + Xstat uint16 + Acflag uint16 + Pctcpu uint32 + Estcpu uint32 + Slptime uint32 + Swtime uint32 + Cow uint32 + Runtime uint64 + Start Timeval + Childtime Timeval + Flag int32 + Kiflag int32 + Traceflag int32 + Stat int8 + Nice int8 + Lock int8 + Rqindex int8 + Oncpu uint8 + Lastcpu uint8 + Tdname [17]int8 + Wmesg [9]int8 + Login [18]int8 + Lockname [9]int8 + Comm [20]int8 + Emul [17]int8 + Loginclass [18]int8 + Sparestrings [50]int8 + Spareints [4]int32 + Flag2 int32 + Fibnum int32 + Cr_flags uint32 + Jid int32 + Numthreads int32 + Tid int32 + Pri Priority + Rusage Rusage + Rusage_ch Rusage + Pcb int32 /* pcb */ + Kstack int32 + Udata int32 + Tdaddr int32 /* thread */ + Spareptrs [6]int64 + Sparelongs [12]int64 + Sflag int64 + Tdflags int64 +} + +type Priority struct { + Class uint8 + Level uint8 + Native uint8 + User uint8 +} + +type KinfoVmentry struct { + Structsize int32 + Type int32 + Start uint64 + End uint64 + Offset uint64 + Vn_fileid uint64 + Vn_fsid uint32 + Flags int32 + Resident int32 + Private_resident int32 + Protection int32 + Ref_count int32 + Shadow_count int32 + Vn_type int32 + Vn_size uint64 + Vn_rdev uint32 + Vn_mode uint16 + Status uint16 + X_kve_ispare [12]int32 + Path [1024]int8 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go new file mode 100644 index 0000000000000..dbb3baa3e756c --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_freebsd_arm64.go @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build freebsd && arm64 + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs process/types_freebsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 14 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 7 +) + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x488 + sizeOfKinfoProc = 0x440 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SWAIT = 6 + SLOCK = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur int64 + Max int64 +} + +type KinfoProc struct { + Structsize int32 + Layout int32 + Args *int64 /* pargs */ + Paddr *int64 /* proc */ + Addr *int64 /* user */ + Tracep *int64 /* vnode */ + Textvp *int64 /* vnode */ + Fd *int64 /* filedesc */ + Vmspace *int64 /* vmspace */ + Wchan *byte + Pid int32 + Ppid int32 + Pgid int32 + Tpgid int32 + Sid int32 + Tsid int32 + Jobc int16 + Spare_short1 int16 + Tdev_freebsd11 uint32 + Siglist [16]byte /* sigset */ + Sigmask [16]byte /* sigset */ + Sigignore [16]byte /* sigset */ + Sigcatch [16]byte /* sigset */ + Uid uint32 + Ruid uint32 + Svuid uint32 + Rgid uint32 + Svgid uint32 + Ngroups int16 + Spare_short2 int16 + Groups [16]uint32 + Size uint64 + Rssize int64 + Swrss int64 + Tsize int64 + Dsize int64 + Ssize int64 + Xstat uint16 + Acflag uint16 + Pctcpu uint32 + Estcpu uint32 + Slptime uint32 + Swtime uint32 + Cow uint32 + Runtime uint64 + Start Timeval + Childtime Timeval + Flag int64 + Kiflag int64 + Traceflag int32 + Stat uint8 + Nice int8 + Lock uint8 + Rqindex uint8 + Oncpu_old uint8 + Lastcpu_old uint8 + Tdname [17]uint8 + Wmesg [9]uint8 + Login [18]uint8 + Lockname [9]uint8 + Comm [20]int8 + Emul [17]uint8 + Loginclass [18]uint8 + Moretdname [4]uint8 + Sparestrings [46]uint8 + Spareints [2]int32 + Tdev uint64 + Oncpu int32 + Lastcpu int32 + Tracer int32 + Flag2 int32 + Fibnum int32 + Cr_flags uint32 + Jid int32 + Numthreads int32 + Tid int32 + Pri Priority + Rusage Rusage + Rusage_ch Rusage + Pcb *int64 /* pcb */ + Kstack *byte + Udata *byte + Tdaddr *int64 /* thread */ + Spareptrs [6]*byte + Sparelongs [12]int64 + Sflag int64 + Tdflags int64 +} + +type Priority struct { + Class uint8 + Level uint8 + Native uint8 + User uint8 +} + +type KinfoVmentry struct { + Structsize int32 + Type int32 + Start uint64 + End uint64 + Offset uint64 + Vn_fileid uint64 + Vn_fsid_freebsd11 uint32 + Flags int32 + Resident int32 + Private_resident int32 + Protection int32 + Ref_count int32 + Shadow_count int32 + Vn_type int32 + Vn_size uint64 + Vn_rdev_freebsd11 uint32 + Vn_mode uint16 + Status uint16 + Vn_fsid uint64 + Vn_rdev uint64 + X_kve_ispare [8]int32 + Path [1024]uint8 +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go new file mode 100644 index 0000000000000..2151ed5c84629 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_linux.go @@ -0,0 +1,1187 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux + +package process + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "math" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/tklauser/go-sysconf" + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" +) + +var pageSize = uint64(os.Getpagesize()) + +const prioProcess = 0 // linux/resource.h + +var clockTicks = 100 // default value + +func init() { + clkTck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + // ignore errors + if err == nil { + clockTicks = int(clkTck) + } +} + +// MemoryInfoExStat is different between OSes +type MemoryInfoExStat struct { + RSS uint64 `json:"rss"` // bytes + VMS uint64 `json:"vms"` // bytes + Shared uint64 `json:"shared"` // bytes + Text uint64 `json:"text"` // bytes + Lib uint64 `json:"lib"` // bytes + Data uint64 `json:"data"` // bytes + Dirty uint64 `json:"dirty"` // bytes +} + +func (m MemoryInfoExStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} + +type MemoryMapsStat struct { + Path string `json:"path"` + Rss uint64 `json:"rss"` + Size uint64 `json:"size"` + Pss uint64 `json:"pss"` + SharedClean uint64 `json:"sharedClean"` + SharedDirty uint64 `json:"sharedDirty"` + PrivateClean uint64 `json:"privateClean"` + PrivateDirty uint64 `json:"privateDirty"` + Referenced uint64 `json:"referenced"` + Anonymous uint64 `json:"anonymous"` + Swap uint64 `json:"swap"` +} + +// String returns JSON value of the process. +func (m MemoryMapsStat) String() string { + s, _ := json.Marshal(m) + return string(s) +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + _, ppid, _, _, _, _, _, err := p.fillFromStatWithContext(ctx) + if err != nil { + return -1, err + } + return ppid, nil +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + if p.name == "" { + if err := p.fillNameWithContext(ctx); err != nil { + return "", err + } + } + return p.name, nil +} + +func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { + if p.tgid == 0 { + if err := p.fillFromStatusWithContext(ctx); err != nil { + return 0, err + } + } + return p.tgid, nil +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + return p.fillFromExeWithContext(ctx) +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + return p.fillFromCmdlineWithContext(ctx) +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + return p.fillSliceFromCmdlineWithContext(ctx) +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + _, _, _, createTime, _, _, _, err := p.fillFromStatWithContext(ctx) + if err != nil { + return 0, err + } + return createTime, nil +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return p.fillFromCwdWithContext(ctx) +} + +func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { + err := p.fillFromStatusWithContext(ctx) + if err != nil { + return []string{""}, err + } + return []string{p.status}, nil +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "stat") + contents, err := os.ReadFile(statPath) + if err != nil { + return false, err + } + fields := strings.Fields(string(contents)) + if len(fields) < 8 { + return false, fmt.Errorf("insufficient data in %s", statPath) + } + pgid := fields[4] + tpgid := fields[7] + return pgid == tpgid, nil +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { + err := p.fillFromStatusWithContext(ctx) + if err != nil { + return []uint32{}, err + } + return p.uids, nil +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { + err := p.fillFromStatusWithContext(ctx) + if err != nil { + return []uint32{}, err + } + return p.gids, nil +} + +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { + err := p.fillFromStatusWithContext(ctx) + if err != nil { + return []uint32{}, err + } + return p.groups, nil +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + t, _, _, _, _, _, _, err := p.fillFromStatWithContext(ctx) + if err != nil { + return "", err + } + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + terminal := termmap[t] + return terminal, nil +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + _, _, _, _, _, nice, _, err := p.fillFromStatWithContext(ctx) + if err != nil { + return 0, err + } + return nice, nil +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + return p.RlimitUsageWithContext(ctx, false) +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + rlimits, err := p.fillFromLimitsWithContext(ctx) + if !gatherUsed || err != nil { + return rlimits, err + } + + _, _, _, _, rtprio, nice, _, err := p.fillFromStatWithContext(ctx) + if err != nil { + return nil, err + } + if err := p.fillFromStatusWithContext(ctx); err != nil { + return nil, err + } + + for i := range rlimits { + rs := &rlimits[i] + switch rs.Resource { + case RLIMIT_CPU: + times, err := p.TimesWithContext(ctx) + if err != nil { + return nil, err + } + rs.Used = uint64(times.User + times.System) + case RLIMIT_DATA: + rs.Used = uint64(p.memInfo.Data) + case RLIMIT_STACK: + rs.Used = uint64(p.memInfo.Stack) + case RLIMIT_RSS: + rs.Used = uint64(p.memInfo.RSS) + case RLIMIT_NOFILE: + n, err := p.NumFDsWithContext(ctx) + if err != nil { + return nil, err + } + rs.Used = uint64(n) + case RLIMIT_MEMLOCK: + rs.Used = uint64(p.memInfo.Locked) + case RLIMIT_AS: + rs.Used = uint64(p.memInfo.VMS) + case RLIMIT_LOCKS: + // TODO we can get the used value from /proc/$pid/locks. But linux doesn't enforce it, so not a high priority. + case RLIMIT_SIGPENDING: + rs.Used = p.sigInfo.PendingProcess + case RLIMIT_NICE: + // The rlimit for nice is a little unusual, in that 0 means the niceness cannot be decreased beyond the current value, but it can be increased. + // So effectively: if rs.Soft == 0 { rs.Soft = rs.Used } + rs.Used = uint64(nice) + case RLIMIT_RTPRIO: + rs.Used = uint64(rtprio) + } + } + + return rlimits, err +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + return p.fillFromIOWithContext(ctx) +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + err := p.fillFromStatusWithContext(ctx) + if err != nil { + return nil, err + } + return p.numCtxSwitches, nil +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + _, fnames, err := p.fillFromfdListWithContext(ctx) + return int32(len(fnames)), err +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + err := p.fillFromStatusWithContext(ctx) + if err != nil { + return 0, err + } + return p.numThreads, nil +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + ret := make(map[int32]*cpu.TimesStat) + taskPath := common.HostProcWithContext(ctx, strconv.Itoa(int(p.Pid)), "task") + + tids, err := readPidsFromDir(taskPath) + if err != nil { + return nil, err + } + + for _, tid := range tids { + _, _, cpuTimes, _, _, _, _, err := p.fillFromTIDStatWithContext(ctx, tid) + if err != nil { + return nil, err + } + ret[tid] = cpuTimes + } + + return ret, nil +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + _, _, cpuTimes, _, _, _, _, err := p.fillFromStatWithContext(ctx) + if err != nil { + return nil, err + } + return cpuTimes, nil +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + meminfo, _, err := p.fillFromStatmWithContext(ctx) + if err != nil { + return nil, err + } + return meminfo, nil +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + _, memInfoEx, err := p.fillFromStatmWithContext(ctx) + if err != nil { + return nil, err + } + return memInfoEx, nil +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + _, _, _, _, _, _, pageFaults, err := p.fillFromStatWithContext(ctx) + if err != nil { + return nil, err + } + return pageFaults, nil +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + if err != nil { + return nil, err + } + if len(pids) == 0 { + return nil, ErrorNoChildren + } + ret := make([]*Process, 0, len(pids)) + for _, pid := range pids { + np, err := NewProcessWithContext(ctx, pid) + if err != nil { + return nil, err + } + ret = append(ret, np) + } + return ret, nil +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + _, ofs, err := p.fillFromfdWithContext(ctx) + if err != nil { + return nil, err + } + ret := make([]OpenFilesStat, len(ofs)) + for i, o := range ofs { + ret[i] = *o + } + + return ret, nil +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return net.ConnectionsPidWithContext(ctx, "all", p.Pid) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { + return net.ConnectionsPidMaxWithContext(ctx, "all", p.Pid, max) +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + pid := p.Pid + var ret []MemoryMapsStat + smapsPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "smaps") + if grouped { + ret = make([]MemoryMapsStat, 1) + // If smaps_rollup exists (require kernel >= 4.15), then we will use it + // for pre-summed memory information for a process. + smapsRollupPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "smaps_rollup") + if _, err := os.Stat(smapsRollupPath); !os.IsNotExist(err) { + smapsPath = smapsRollupPath + } + } + contents, err := os.ReadFile(smapsPath) + if err != nil { + return nil, err + } + lines := strings.Split(string(contents), "\n") + + // function of parsing a block + getBlock := func(firstLine []string, block []string) (MemoryMapsStat, error) { + m := MemoryMapsStat{} + m.Path = firstLine[len(firstLine)-1] + + for _, line := range block { + if strings.Contains(line, "VmFlags") { + continue + } + field := strings.Split(line, ":") + if len(field) < 2 { + continue + } + v := strings.Trim(field[1], "kB") // remove last "kB" + v = strings.TrimSpace(v) + t, err := strconv.ParseUint(v, 10, 64) + if err != nil { + return m, err + } + + switch field[0] { + case "Size": + m.Size = t + case "Rss": + m.Rss = t + case "Pss": + m.Pss = t + case "Shared_Clean": + m.SharedClean = t + case "Shared_Dirty": + m.SharedDirty = t + case "Private_Clean": + m.PrivateClean = t + case "Private_Dirty": + m.PrivateDirty = t + case "Referenced": + m.Referenced = t + case "Anonymous": + m.Anonymous = t + case "Swap": + m.Swap = t + } + } + return m, nil + } + + var firstLine []string + blocks := make([]string, 0, 16) + + for i, line := range lines { + fields := strings.Fields(line) + if (len(fields) > 0 && !strings.HasSuffix(fields[0], ":")) || i == len(lines)-1 { + // new block section + if len(firstLine) > 0 && len(blocks) > 0 { + g, err := getBlock(firstLine, blocks) + if err != nil { + return &ret, err + } + if grouped { + ret[0].Size += g.Size + ret[0].Rss += g.Rss + ret[0].Pss += g.Pss + ret[0].SharedClean += g.SharedClean + ret[0].SharedDirty += g.SharedDirty + ret[0].PrivateClean += g.PrivateClean + ret[0].PrivateDirty += g.PrivateDirty + ret[0].Referenced += g.Referenced + ret[0].Anonymous += g.Anonymous + ret[0].Swap += g.Swap + } else { + ret = append(ret, g) + } + } + // starts new block + blocks = make([]string, 0, 16) + firstLine = fields + } else { + blocks = append(blocks, line) + } + } + + return &ret, nil +} + +func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { + environPath := common.HostProcWithContext(ctx, strconv.Itoa(int(p.Pid)), "environ") + + environContent, err := os.ReadFile(environPath) + if err != nil { + return nil, err + } + + return strings.Split(string(environContent), "\000"), nil +} + +/** +** Internal functions +**/ + +func limitToUint(val string) (uint64, error) { + if val == "unlimited" { + return math.MaxUint64, nil + } + res, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return 0, err + } + return res, nil +} + +// Get num_fds from /proc/(pid)/limits +func (p *Process) fillFromLimitsWithContext(ctx context.Context) ([]RlimitStat, error) { + pid := p.Pid + limitsFile := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "limits") + d, err := os.Open(limitsFile) + if err != nil { + return nil, err + } + defer d.Close() + + var limitStats []RlimitStat + + limitsScanner := bufio.NewScanner(d) + for limitsScanner.Scan() { + var statItem RlimitStat + + str := strings.Fields(limitsScanner.Text()) + + // Remove the header line + if strings.Contains(str[len(str)-1], "Units") { + continue + } + + // Assert that last item is a Hard limit + statItem.Hard, err = limitToUint(str[len(str)-1]) + if err != nil { + // On error remove last item and try once again since it can be unit or header line + str = str[:len(str)-1] + statItem.Hard, err = limitToUint(str[len(str)-1]) + if err != nil { + return nil, err + } + } + // Remove last item from string + str = str[:len(str)-1] + + // Now last item is a Soft limit + statItem.Soft, err = limitToUint(str[len(str)-1]) + if err != nil { + return nil, err + } + // Remove last item from string + str = str[:len(str)-1] + + // The rest is a stats name + resourceName := strings.Join(str, " ") + switch resourceName { + case "Max cpu time": + statItem.Resource = RLIMIT_CPU + case "Max file size": + statItem.Resource = RLIMIT_FSIZE + case "Max data size": + statItem.Resource = RLIMIT_DATA + case "Max stack size": + statItem.Resource = RLIMIT_STACK + case "Max core file size": + statItem.Resource = RLIMIT_CORE + case "Max resident set": + statItem.Resource = RLIMIT_RSS + case "Max processes": + statItem.Resource = RLIMIT_NPROC + case "Max open files": + statItem.Resource = RLIMIT_NOFILE + case "Max locked memory": + statItem.Resource = RLIMIT_MEMLOCK + case "Max address space": + statItem.Resource = RLIMIT_AS + case "Max file locks": + statItem.Resource = RLIMIT_LOCKS + case "Max pending signals": + statItem.Resource = RLIMIT_SIGPENDING + case "Max msgqueue size": + statItem.Resource = RLIMIT_MSGQUEUE + case "Max nice priority": + statItem.Resource = RLIMIT_NICE + case "Max realtime priority": + statItem.Resource = RLIMIT_RTPRIO + case "Max realtime timeout": + statItem.Resource = RLIMIT_RTTIME + default: + continue + } + + limitStats = append(limitStats, statItem) + } + + if err := limitsScanner.Err(); err != nil { + return nil, err + } + + return limitStats, nil +} + +// Get list of /proc/(pid)/fd files +func (p *Process) fillFromfdListWithContext(ctx context.Context) (string, []string, error) { + pid := p.Pid + statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "fd") + d, err := os.Open(statPath) + if err != nil { + return statPath, []string{}, err + } + defer d.Close() + fnames, err := d.Readdirnames(-1) + return statPath, fnames, err +} + +// Get num_fds from /proc/(pid)/fd +func (p *Process) fillFromfdWithContext(ctx context.Context) (int32, []*OpenFilesStat, error) { + statPath, fnames, err := p.fillFromfdListWithContext(ctx) + if err != nil { + return 0, nil, err + } + numFDs := int32(len(fnames)) + + var openfiles []*OpenFilesStat + for _, fd := range fnames { + fpath := filepath.Join(statPath, fd) + filepath, err := os.Readlink(fpath) + if err != nil { + continue + } + t, err := strconv.ParseUint(fd, 10, 64) + if err != nil { + return numFDs, openfiles, err + } + o := &OpenFilesStat{ + Path: filepath, + Fd: t, + } + openfiles = append(openfiles, o) + } + + return numFDs, openfiles, nil +} + +// Get cwd from /proc/(pid)/cwd +func (p *Process) fillFromCwdWithContext(ctx context.Context) (string, error) { + pid := p.Pid + cwdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cwd") + cwd, err := os.Readlink(cwdPath) + if err != nil { + return "", err + } + return string(cwd), nil +} + +// Get exe from /proc/(pid)/exe +func (p *Process) fillFromExeWithContext(ctx context.Context) (string, error) { + pid := p.Pid + exePath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "exe") + exe, err := os.Readlink(exePath) + if err != nil { + return "", err + } + return string(exe), nil +} + +// Get cmdline from /proc/(pid)/cmdline +func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error) { + pid := p.Pid + cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline") + cmdline, err := os.ReadFile(cmdPath) + if err != nil { + return "", err + } + ret := strings.FieldsFunc(string(cmdline), func(r rune) bool { + return r == '\u0000' + }) + + return strings.Join(ret, " "), nil +} + +func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string, error) { + pid := p.Pid + cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline") + cmdline, err := os.ReadFile(cmdPath) + if err != nil { + return nil, err + } + if len(cmdline) == 0 { + return nil, nil + } + + cmdline = bytes.TrimRight(cmdline, "\x00") + + parts := bytes.Split(cmdline, []byte{0}) + var strParts []string + for _, p := range parts { + strParts = append(strParts, string(p)) + } + + return strParts, nil +} + +// Get IO status from /proc/(pid)/io +func (p *Process) fillFromIOWithContext(ctx context.Context) (*IOCountersStat, error) { + pid := p.Pid + ioPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "io") + ioline, err := os.ReadFile(ioPath) + if err != nil { + return nil, err + } + lines := strings.Split(string(ioline), "\n") + ret := &IOCountersStat{} + + for _, line := range lines { + field := strings.Fields(line) + if len(field) < 2 { + continue + } + t, err := strconv.ParseUint(field[1], 10, 64) + if err != nil { + return nil, err + } + param := strings.TrimSuffix(field[0], ":") + switch param { + case "syscr": + ret.ReadCount = t + case "syscw": + ret.WriteCount = t + case "read_bytes": + ret.ReadBytes = t + case "write_bytes": + ret.WriteBytes = t + } + } + + return ret, nil +} + +// Get memory info from /proc/(pid)/statm +func (p *Process) fillFromStatmWithContext(ctx context.Context) (*MemoryInfoStat, *MemoryInfoExStat, error) { + pid := p.Pid + memPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "statm") + contents, err := os.ReadFile(memPath) + if err != nil { + return nil, nil, err + } + fields := strings.Split(string(contents), " ") + + vms, err := strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, nil, err + } + rss, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, nil, err + } + memInfo := &MemoryInfoStat{ + RSS: rss * pageSize, + VMS: vms * pageSize, + } + + shared, err := strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, nil, err + } + text, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, nil, err + } + lib, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, nil, err + } + dirty, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, nil, err + } + + memInfoEx := &MemoryInfoExStat{ + RSS: rss * pageSize, + VMS: vms * pageSize, + Shared: shared * pageSize, + Text: text * pageSize, + Lib: lib * pageSize, + Dirty: dirty * pageSize, + } + + return memInfo, memInfoEx, nil +} + +// Get name from /proc/(pid)/comm or /proc/(pid)/status +func (p *Process) fillNameWithContext(ctx context.Context) error { + err := p.fillFromCommWithContext(ctx) + if err == nil && p.name != "" && len(p.name) < 15 { + return nil + } + return p.fillFromStatusWithContext(ctx) +} + +// Get name from /proc/(pid)/comm +func (p *Process) fillFromCommWithContext(ctx context.Context) error { + pid := p.Pid + statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "comm") + contents, err := os.ReadFile(statPath) + if err != nil { + return err + } + + p.name = strings.TrimSuffix(string(contents), "\n") + return nil +} + +// Get various status from /proc/(pid)/status +func (p *Process) fillFromStatus() error { + return p.fillFromStatusWithContext(context.Background()) +} + +func (p *Process) fillFromStatusWithContext(ctx context.Context) error { + pid := p.Pid + statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "status") + contents, err := os.ReadFile(statPath) + if err != nil { + return err + } + lines := strings.Split(string(contents), "\n") + p.numCtxSwitches = &NumCtxSwitchesStat{} + p.memInfo = &MemoryInfoStat{} + p.sigInfo = &SignalInfoStat{} + for _, line := range lines { + tabParts := strings.SplitN(line, "\t", 2) + if len(tabParts) < 2 { + continue + } + value := tabParts[1] + switch strings.TrimRight(tabParts[0], ":") { + case "Name": + p.name = strings.Trim(value, " \t") + if len(p.name) >= 15 { + cmdlineSlice, err := p.CmdlineSliceWithContext(ctx) + if err != nil { + return err + } + if len(cmdlineSlice) > 0 { + extendedName := filepath.Base(cmdlineSlice[0]) + if strings.HasPrefix(extendedName, p.name) { + p.name = extendedName + } + } + } + // Ensure we have a copy and not reference into slice + p.name = string([]byte(p.name)) + case "State": + p.status = convertStatusChar(value[0:1]) + // Ensure we have a copy and not reference into slice + p.status = string([]byte(p.status)) + case "PPid", "Ppid": + pval, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + p.parent = int32(pval) + case "Tgid": + pval, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + p.tgid = int32(pval) + case "Uid": + p.uids = make([]uint32, 0, 4) + for _, i := range strings.Split(value, "\t") { + v, err := strconv.ParseInt(i, 10, 32) + if err != nil { + return err + } + p.uids = append(p.uids, uint32(v)) + } + case "Gid": + p.gids = make([]uint32, 0, 4) + for _, i := range strings.Split(value, "\t") { + v, err := strconv.ParseInt(i, 10, 32) + if err != nil { + return err + } + p.gids = append(p.gids, uint32(v)) + } + case "Groups": + groups := strings.Fields(value) + p.groups = make([]uint32, 0, len(groups)) + for _, i := range groups { + v, err := strconv.ParseUint(i, 10, 32) + if err != nil { + return err + } + p.groups = append(p.groups, uint32(v)) + } + case "Threads": + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + p.numThreads = int32(v) + case "voluntary_ctxt_switches": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + p.numCtxSwitches.Voluntary = v + case "nonvoluntary_ctxt_switches": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + p.numCtxSwitches.Involuntary = v + case "VmRSS": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.RSS = v * 1024 + case "VmSize": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.VMS = v * 1024 + case "VmSwap": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.Swap = v * 1024 + case "VmHWM": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.HWM = v * 1024 + case "VmData": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.Data = v * 1024 + case "VmStk": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.Stack = v * 1024 + case "VmLck": + value := strings.Trim(value, " kB") // remove last "kB" + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + p.memInfo.Locked = v * 1024 + case "SigPnd": + if len(value) > 16 { + value = value[len(value)-16:] + } + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.PendingThread = v + case "ShdPnd": + if len(value) > 16 { + value = value[len(value)-16:] + } + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.PendingProcess = v + case "SigBlk": + if len(value) > 16 { + value = value[len(value)-16:] + } + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.Blocked = v + case "SigIgn": + if len(value) > 16 { + value = value[len(value)-16:] + } + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.Ignored = v + case "SigCgt": + if len(value) > 16 { + value = value[len(value)-16:] + } + v, err := strconv.ParseUint(value, 16, 64) + if err != nil { + return err + } + p.sigInfo.Caught = v + } + + } + return nil +} + +func (p *Process) fillFromTIDStat(tid int32) (uint64, int32, *cpu.TimesStat, int64, uint32, int32, *PageFaultsStat, error) { + return p.fillFromTIDStatWithContext(context.Background(), tid) +} + +func (p *Process) fillFromTIDStatWithContext(ctx context.Context, tid int32) (uint64, int32, *cpu.TimesStat, int64, uint32, int32, *PageFaultsStat, error) { + pid := p.Pid + var statPath string + + if tid == -1 { + statPath = common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "stat") + } else { + statPath = common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "task", strconv.Itoa(int(tid)), "stat") + } + + contents, err := os.ReadFile(statPath) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + // Indexing from one, as described in `man proc` about the file /proc/[pid]/stat + fields := splitProcStat(contents) + + terminal, err := strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + + ppid, err := strconv.ParseInt(fields[4], 10, 32) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + utime, err := strconv.ParseFloat(fields[14], 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + + stime, err := strconv.ParseFloat(fields[15], 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + + // There is no such thing as iotime in stat file. As an approximation, we + // will use delayacct_blkio_ticks (aggregated block I/O delays, as per Linux + // docs). Note: I am assuming at least Linux 2.6.18 + var iotime float64 + if len(fields) > 42 { + iotime, err = strconv.ParseFloat(fields[42], 64) + if err != nil { + iotime = 0 // Ancient linux version, most likely + } + } else { + iotime = 0 // e.g. SmartOS containers + } + + cpuTimes := &cpu.TimesStat{ + CPU: "cpu", + User: utime / float64(clockTicks), + System: stime / float64(clockTicks), + Iowait: iotime / float64(clockTicks), + } + + bootTime, _ := common.BootTimeWithContext(ctx, enableBootTimeCache) + t, err := strconv.ParseUint(fields[22], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + ctime := (t / uint64(clockTicks)) + uint64(bootTime) + createTime := int64(ctime * 1000) + + rtpriority, err := strconv.ParseInt(fields[18], 10, 32) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + if rtpriority < 0 { + rtpriority = rtpriority*-1 - 1 + } else { + rtpriority = 0 + } + + // p.Nice = mustParseInt32(fields[18]) + // use syscall instead of parse Stat file + snice, _ := unix.Getpriority(prioProcess, int(pid)) + nice := int32(snice) // FIXME: is this true? + + minFault, err := strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + cMinFault, err := strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + majFault, err := strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + cMajFault, err := strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return 0, 0, nil, 0, 0, 0, nil, err + } + + faults := &PageFaultsStat{ + MinorFaults: minFault, + MajorFaults: majFault, + ChildMinorFaults: cMinFault, + ChildMajorFaults: cMajFault, + } + + return terminal, int32(ppid), cpuTimes, createTime, uint32(rtpriority), nice, faults, nil +} + +func (p *Process) fillFromStatWithContext(ctx context.Context) (uint64, int32, *cpu.TimesStat, int64, uint32, int32, *PageFaultsStat, error) { + return p.fillFromTIDStatWithContext(ctx, -1) +} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + return readPidsFromDir(common.HostProcWithContext(ctx)) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, err + } + + for _, pid := range pids { + p, err := NewProcessWithContext(ctx, pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +func readPidsFromDir(path string) ([]int32, error) { + var ret []int32 + + d, err := os.Open(path) + if err != nil { + return nil, err + } + defer d.Close() + + fnames, err := d.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, fname := range fnames { + pid, err := strconv.ParseInt(fname, 10, 32) + if err != nil { + // if not numeric name, just skip + continue + } + ret = append(ret, int32(pid)) + } + + return ret, nil +} + +func splitProcStat(content []byte) []string { + nameStart := bytes.IndexByte(content, '(') + nameEnd := bytes.LastIndexByte(content, ')') + restFields := strings.Fields(string(content[nameEnd+2:])) // +2 skip ') ' + name := content[nameStart+1 : nameEnd] + pid := strings.TrimSpace(string(content[:nameStart])) + fields := make([]string, 3, len(restFields)+3) + fields[1] = string(pid) + fields[2] = string(name) + fields = append(fields, restFields...) + return fields +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go new file mode 100644 index 0000000000000..7cd8ca7364299 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd.go @@ -0,0 +1,387 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd + +package process + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "io" + "path/filepath" + "strconv" + "strings" + "unsafe" + + cpu "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + mem "github.com/shirou/gopsutil/v4/mem" + net "github.com/shirou/gopsutil/v4/net" + "golang.org/x/sys/unix" +) + +func pidsWithContext(ctx context.Context) ([]int32, error) { + var ret []int32 + procs, err := ProcessesWithContext(ctx) + if err != nil { + return ret, nil + } + + for _, p := range procs { + ret = append(ret, p.Pid) + } + + return ret, nil +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + + return k.Ppid, nil +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + name := common.IntToString(k.Comm[:]) + + if len(name) >= 15 { + cmdlineSlice, err := p.CmdlineSliceWithContext(ctx) + if err != nil { + return "", err + } + if len(cmdlineSlice) > 0 { + extendedName := filepath.Base(cmdlineSlice[0]) + if strings.HasPrefix(extendedName, p.name) { + name = extendedName + } + } + } + + return name, nil +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + mib := []int32{CTLKern, KernProcArgs, p.Pid, KernProcArgv} + buf, _, err := common.CallSyscall(mib) + if err != nil { + return nil, err + } + + /* From man sysctl(2): + The buffer pointed to by oldp is filled with an array of char + pointers followed by the strings themselves. The last char + pointer is a NULL pointer. */ + var strParts []string + r := bytes.NewReader(buf) + baseAddr := uintptr(unsafe.Pointer(&buf[0])) + for { + argvp, err := readPtr(r) + if err != nil { + return nil, err + } + if argvp == 0 { // check for a NULL pointer + break + } + offset := argvp - baseAddr + length := uintptr(bytes.IndexByte(buf[offset:], 0)) + str := string(buf[offset : offset+length]) + strParts = append(strParts, str) + } + + return strParts, nil +} + +// readPtr reads a pointer data from a given reader. WARNING: only little +// endian architectures are supported. +func readPtr(r io.Reader) (uintptr, error) { + switch sizeofPtr { + case 4: + var p uint32 + if err := binary.Read(r, binary.LittleEndian, &p); err != nil { + return 0, err + } + return uintptr(p), nil + case 8: + var p uint64 + if err := binary.Read(r, binary.LittleEndian, &p); err != nil { + return 0, err + } + return uintptr(p), nil + default: + return 0, fmt.Errorf("unsupported pointer size") + } +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + argv, err := p.CmdlineSliceWithContext(ctx) + if err != nil { + return "", err + } + return strings.Join(argv, " "), nil +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { + k, err := p.getKProc() + if err != nil { + return []string{""}, err + } + var s string + switch k.Stat { + case SIDL: + case SRUN: + case SONPROC: + s = Running + case SSLEEP: + s = Sleep + case SSTOP: + s = Stop + case SDEAD: + s = Zombie + } + + return []string{s}, nil +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + // see https://github.com/shirou/gopsutil/issues/596#issuecomment-432707831 for implementation details + pid := p.Pid + out, err := invoke.CommandWithContext(ctx, "ps", "-o", "stat=", "-p", strconv.Itoa(int(pid))) + if err != nil { + return false, err + } + return strings.IndexByte(string(out), '+') != -1, nil +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + uids := make([]uint32, 0, 3) + + uids = append(uids, uint32(k.Ruid), uint32(k.Uid), uint32(k.Svuid)) + + return uids, nil +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + gids := make([]uint32, 0, 3) + gids = append(gids, uint32(k.Rgid), uint32(k.Ngroups), uint32(k.Svgid)) + + return gids, nil +} + +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + + groups := make([]uint32, k.Ngroups) + for i := int16(0); i < k.Ngroups; i++ { + groups[i] = uint32(k.Groups[i]) + } + + return groups, nil +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + k, err := p.getKProc() + if err != nil { + return "", err + } + + ttyNr := uint64(k.Tdev) + + termmap, err := getTerminalMap() + if err != nil { + return "", err + } + + return termmap[ttyNr], nil +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + k, err := p.getKProc() + if err != nil { + return 0, err + } + return int32(k.Nice), nil +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + return &IOCountersStat{ + ReadCount: uint64(k.Uru_inblock), + WriteCount: uint64(k.Uru_oublock), + }, nil +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + /* not supported, just return 1 */ + return 1, nil +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + return &cpu.TimesStat{ + CPU: "cpu", + User: float64(k.Uutime_sec) + float64(k.Uutime_usec)/1000000, + System: float64(k.Ustime_sec) + float64(k.Ustime_usec)/1000000, + }, nil +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + k, err := p.getKProc() + if err != nil { + return nil, err + } + pageSize, err := mem.GetPageSizeWithContext(ctx) + if err != nil { + return nil, err + } + + return &MemoryInfoStat{ + RSS: uint64(k.Vm_rssize) * pageSize, + VMS: uint64(k.Vm_tsize) + uint64(k.Vm_dsize) + + uint64(k.Vm_ssize), + }, nil +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + pids, err := common.CallPgrepWithContext(ctx, invoke, p.Pid) + if err != nil { + return nil, err + } + ret := make([]*Process, 0, len(pids)) + for _, pid := range pids { + np, err := NewProcessWithContext(ctx, pid) + if err != nil { + return nil, err + } + ret = append(ret, np) + } + return ret, nil +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + results := []*Process{} + + buf, length, err := callKernProcSyscall(KernProcAll, 0) + if err != nil { + return results, err + } + + // get kinfo_proc size + count := int(length / uint64(sizeOfKinfoProc)) + + // parse buf to procs + for i := 0; i < count; i++ { + b := buf[i*sizeOfKinfoProc : (i+1)*sizeOfKinfoProc] + k, err := parseKinfoProc(b) + if err != nil { + continue + } + p, err := NewProcessWithContext(ctx, int32(k.Pid)) + if err != nil { + continue + } + + results = append(results, p) + } + + return results, nil +} + +func (p *Process) getKProc() (*KinfoProc, error) { + buf, length, err := callKernProcSyscall(KernProcPID, p.Pid) + if err != nil { + return nil, err + } + if length != sizeOfKinfoProc { + return nil, err + } + + k, err := parseKinfoProc(buf) + if err != nil { + return nil, err + } + return &k, nil +} + +func callKernProcSyscall(op int32, arg int32) ([]byte, uint64, error) { + mib := []int32{CTLKern, KernProc, op, arg, sizeOfKinfoProc, 0} + mibptr := unsafe.Pointer(&mib[0]) + miblen := uint64(len(mib)) + length := uint64(0) + _, _, err := unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + 0, + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return nil, length, err + } + + count := int32(length / uint64(sizeOfKinfoProc)) + mib = []int32{CTLKern, KernProc, op, arg, sizeOfKinfoProc, count} + mibptr = unsafe.Pointer(&mib[0]) + miblen = uint64(len(mib)) + // get proc info itself + buf := make([]byte, length) + _, _, err = unix.Syscall6( + unix.SYS___SYSCTL, + uintptr(mibptr), + uintptr(miblen), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&length)), + 0, + 0) + if err != 0 { + return buf, length, err + } + + return buf, length, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go new file mode 100644 index 0000000000000..e3c5c2b5a1dfb --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_386.go @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd && 386 + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs process/types_openbsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 66 + KernProcAll = 0 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 55 + KernProcArgv = 1 + KernProcEnv = 3 +) + +const ( + ArgMax = 256 * 1024 +) + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x38 + sizeOfKinfoProc = 0x264 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SDEAD = 6 + SONPROC = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int32 +} + +type Timeval struct { + Sec int64 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type KinfoProc struct { + Forw uint64 + Back uint64 + Paddr uint64 + Addr uint64 + Fd uint64 + Stats uint64 + Limit uint64 + Vmspace uint64 + Sigacts uint64 + Sess uint64 + Tsess uint64 + Ru uint64 + Eflag int32 + Exitsig int32 + Flag int32 + Pid int32 + Ppid int32 + Sid int32 + X_pgid int32 + Tpgid int32 + Uid uint32 + Ruid uint32 + Gid uint32 + Rgid uint32 + Groups [16]uint32 + Ngroups int16 + Jobc int16 + Tdev uint32 + Estcpu uint32 + Rtime_sec uint32 + Rtime_usec uint32 + Cpticks int32 + Pctcpu uint32 + Swtime uint32 + Slptime uint32 + Schedflags int32 + Uticks uint64 + Sticks uint64 + Iticks uint64 + Tracep uint64 + Traceflag int32 + Holdcnt int32 + Siglist int32 + Sigmask uint32 + Sigignore uint32 + Sigcatch uint32 + Stat int8 + Priority uint8 + Usrpri uint8 + Nice uint8 + Xstat uint16 + Acflag uint16 + Comm [24]int8 + Wmesg [8]int8 + Wchan uint64 + Login [32]int8 + Vm_rssize int32 + Vm_tsize int32 + Vm_dsize int32 + Vm_ssize int32 + Uvalid int64 + Ustart_sec uint64 + Ustart_usec uint32 + Uutime_sec uint32 + Uutime_usec uint32 + Ustime_sec uint32 + Ustime_usec uint32 + Uru_maxrss uint64 + Uru_ixrss uint64 + Uru_idrss uint64 + Uru_isrss uint64 + Uru_minflt uint64 + Uru_majflt uint64 + Uru_nswap uint64 + Uru_inblock uint64 + Uru_oublock uint64 + Uru_msgsnd uint64 + Uru_msgrcv uint64 + Uru_nsignals uint64 + Uru_nvcsw uint64 + Uru_nivcsw uint64 + Uctime_sec uint32 + Uctime_usec uint32 + Psflags int32 + Spare int32 + Svuid uint32 + Svgid uint32 + Emul [8]int8 + Rlim_rss_cur uint64 + Cpuid uint64 + Vm_map_size uint64 + Tid int32 + Rtableid uint32 +} + +type Priority struct{} + +type KinfoVmentry struct { + Start uint32 + End uint32 + Guard uint32 + Fspace uint32 + Fspace_augment uint32 + Offset uint64 + Wired_count int32 + Etype int32 + Protection int32 + Max_protection int32 + Advice int32 + Inheritance int32 + Flags uint8 + Pad_cgo_0 [3]byte +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go new file mode 100644 index 0000000000000..beb7c9b0b4daf --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_amd64.go @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: BSD-3-Clause +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs types_openbsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 66 + KernProcAll = 0 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 55 + KernProcArgv = 1 + KernProcEnv = 3 +) + +const ( + ArgMax = 256 * 1024 +) + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x50 + sizeOfKinfoProc = 0x268 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SDEAD = 6 + SONPROC = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type KinfoProc struct { + Forw uint64 + Back uint64 + Paddr uint64 + Addr uint64 + Fd uint64 + Stats uint64 + Limit uint64 + Vmspace uint64 + Sigacts uint64 + Sess uint64 + Tsess uint64 + Ru uint64 + Eflag int32 + Exitsig int32 + Flag int32 + Pid int32 + Ppid int32 + Sid int32 + X_pgid int32 + Tpgid int32 + Uid uint32 + Ruid uint32 + Gid uint32 + Rgid uint32 + Groups [16]uint32 + Ngroups int16 + Jobc int16 + Tdev uint32 + Estcpu uint32 + Rtime_sec uint32 + Rtime_usec uint32 + Cpticks int32 + Pctcpu uint32 + Swtime uint32 + Slptime uint32 + Schedflags int32 + Uticks uint64 + Sticks uint64 + Iticks uint64 + Tracep uint64 + Traceflag int32 + Holdcnt int32 + Siglist int32 + Sigmask uint32 + Sigignore uint32 + Sigcatch uint32 + Stat int8 + Priority uint8 + Usrpri uint8 + Nice uint8 + Xstat uint16 + Acflag uint16 + Comm [24]int8 + Wmesg [8]int8 + Wchan uint64 + Login [32]int8 + Vm_rssize int32 + Vm_tsize int32 + Vm_dsize int32 + Vm_ssize int32 + Uvalid int64 + Ustart_sec uint64 + Ustart_usec uint32 + Uutime_sec uint32 + Uutime_usec uint32 + Ustime_sec uint32 + Ustime_usec uint32 + Pad_cgo_0 [4]byte + Uru_maxrss uint64 + Uru_ixrss uint64 + Uru_idrss uint64 + Uru_isrss uint64 + Uru_minflt uint64 + Uru_majflt uint64 + Uru_nswap uint64 + Uru_inblock uint64 + Uru_oublock uint64 + Uru_msgsnd uint64 + Uru_msgrcv uint64 + Uru_nsignals uint64 + Uru_nvcsw uint64 + Uru_nivcsw uint64 + Uctime_sec uint32 + Uctime_usec uint32 + Psflags int32 + Spare int32 + Svuid uint32 + Svgid uint32 + Emul [8]int8 + Rlim_rss_cur uint64 + Cpuid uint64 + Vm_map_size uint64 + Tid int32 + Rtableid uint32 +} + +type Priority struct{} + +type KinfoVmentry struct { + Start uint64 + End uint64 + Guard uint64 + Fspace uint64 + Fspace_augment uint64 + Offset uint64 + Wired_count int32 + Etype int32 + Protection int32 + Max_protection int32 + Advice int32 + Inheritance int32 + Flags uint8 + Pad_cgo_0 [7]byte +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go new file mode 100644 index 0000000000000..ff082f43f87df --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm.go @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd && arm + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs process/types_openbsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 66 + KernProcAll = 0 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 55 + KernProcArgv = 1 + KernProcEnv = 3 +) + +const ( + ArgMax = 256 * 1024 +) + +const ( + sizeofPtr = 0x4 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x4 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x38 + sizeOfKinfoProc = 0x264 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SDEAD = 6 + SONPROC = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int32 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int32 +} + +type Timeval struct { + Sec int64 + Usec int32 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int32 + Ixrss int32 + Idrss int32 + Isrss int32 + Minflt int32 + Majflt int32 + Nswap int32 + Inblock int32 + Oublock int32 + Msgsnd int32 + Msgrcv int32 + Nsignals int32 + Nvcsw int32 + Nivcsw int32 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type KinfoProc struct { + Forw uint64 + Back uint64 + Paddr uint64 + Addr uint64 + Fd uint64 + Stats uint64 + Limit uint64 + Vmspace uint64 + Sigacts uint64 + Sess uint64 + Tsess uint64 + Ru uint64 + Eflag int32 + Exitsig int32 + Flag int32 + Pid int32 + Ppid int32 + Sid int32 + X_pgid int32 + Tpgid int32 + Uid uint32 + Ruid uint32 + Gid uint32 + Rgid uint32 + Groups [16]uint32 + Ngroups int16 + Jobc int16 + Tdev uint32 + Estcpu uint32 + Rtime_sec uint32 + Rtime_usec uint32 + Cpticks int32 + Pctcpu uint32 + Swtime uint32 + Slptime uint32 + Schedflags int32 + Uticks uint64 + Sticks uint64 + Iticks uint64 + Tracep uint64 + Traceflag int32 + Holdcnt int32 + Siglist int32 + Sigmask uint32 + Sigignore uint32 + Sigcatch uint32 + Stat int8 + Priority uint8 + Usrpri uint8 + Nice uint8 + Xstat uint16 + Acflag uint16 + Comm [24]int8 + Wmesg [8]int8 + Wchan uint64 + Login [32]int8 + Vm_rssize int32 + Vm_tsize int32 + Vm_dsize int32 + Vm_ssize int32 + Uvalid int64 + Ustart_sec uint64 + Ustart_usec uint32 + Uutime_sec uint32 + Uutime_usec uint32 + Ustime_sec uint32 + Ustime_usec uint32 + Uru_maxrss uint64 + Uru_ixrss uint64 + Uru_idrss uint64 + Uru_isrss uint64 + Uru_minflt uint64 + Uru_majflt uint64 + Uru_nswap uint64 + Uru_inblock uint64 + Uru_oublock uint64 + Uru_msgsnd uint64 + Uru_msgrcv uint64 + Uru_nsignals uint64 + Uru_nvcsw uint64 + Uru_nivcsw uint64 + Uctime_sec uint32 + Uctime_usec uint32 + Psflags int32 + Spare int32 + Svuid uint32 + Svgid uint32 + Emul [8]int8 + Rlim_rss_cur uint64 + Cpuid uint64 + Vm_map_size uint64 + Tid int32 + Rtableid uint32 +} + +type Priority struct{} + +type KinfoVmentry struct { + Start uint32 + End uint32 + Guard uint32 + Fspace uint32 + Fspace_augment uint32 + Offset uint64 + Wired_count int32 + Etype int32 + Protection int32 + Max_protection int32 + Advice int32 + Inheritance int32 + Flags uint8 + Pad_cgo_0 [3]byte +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go new file mode 100644 index 0000000000000..e180ba359959d --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_arm64.go @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd && arm64 + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs process/types_openbsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 66 + KernProcAll = 0 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 55 + KernProcArgv = 1 + KernProcEnv = 3 +) + +const ( + ArgMax = 256 * 1024 +) + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x50 + sizeOfKinfoProc = 0x270 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SDEAD = 6 + SONPROC = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type KinfoProc struct { + Forw uint64 + Back uint64 + Paddr uint64 + Addr uint64 + Fd uint64 + Stats uint64 + Limit uint64 + Vmspace uint64 + Sigacts uint64 + Sess uint64 + Tsess uint64 + Ru uint64 + Eflag int32 + Exitsig int32 + Flag int32 + Pid int32 + Ppid int32 + Sid int32 + X_pgid int32 + Tpgid int32 + Uid uint32 + Ruid uint32 + Gid uint32 + Rgid uint32 + Groups [16]uint32 + Ngroups int16 + Jobc int16 + Tdev uint32 + Estcpu uint32 + Rtime_sec uint32 + Rtime_usec uint32 + Cpticks int32 + Pctcpu uint32 + Swtime uint32 + Slptime uint32 + Schedflags int32 + Uticks uint64 + Sticks uint64 + Iticks uint64 + Tracep uint64 + Traceflag int32 + Holdcnt int32 + Siglist int32 + Sigmask uint32 + Sigignore uint32 + Sigcatch uint32 + Stat int8 + Priority uint8 + Usrpri uint8 + Nice uint8 + Xstat uint16 + Acflag uint16 + Comm [24]int8 + Wmesg [8]uint8 + Wchan uint64 + Login [32]uint8 + Vm_rssize int32 + Vm_tsize int32 + Vm_dsize int32 + Vm_ssize int32 + Uvalid int64 + Ustart_sec uint64 + Ustart_usec uint32 + Uutime_sec uint32 + Uutime_usec uint32 + Ustime_sec uint32 + Ustime_usec uint32 + Uru_maxrss uint64 + Uru_ixrss uint64 + Uru_idrss uint64 + Uru_isrss uint64 + Uru_minflt uint64 + Uru_majflt uint64 + Uru_nswap uint64 + Uru_inblock uint64 + Uru_oublock uint64 + Uru_msgsnd uint64 + Uru_msgrcv uint64 + Uru_nsignals uint64 + Uru_nvcsw uint64 + Uru_nivcsw uint64 + Uctime_sec uint32 + Uctime_usec uint32 + Psflags uint32 + Spare int32 + Svuid uint32 + Svgid uint32 + Emul [8]uint8 + Rlim_rss_cur uint64 + Cpuid uint64 + Vm_map_size uint64 + Tid int32 + Rtableid uint32 + Pledge uint64 +} + +type Priority struct{} + +type KinfoVmentry struct { + Start uint64 + End uint64 + Guard uint64 + Fspace uint64 + Fspace_augment uint64 + Offset uint64 + Wired_count int32 + Etype int32 + Protection int32 + Max_protection int32 + Advice int32 + Inheritance int32 + Flags uint8 + Pad_cgo_0 [7]byte +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go new file mode 100644 index 0000000000000..c53924b6f8241 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_openbsd_riscv64.go @@ -0,0 +1,204 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build openbsd && riscv64 + +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs process/types_openbsd.go + +package process + +const ( + CTLKern = 1 + KernProc = 66 + KernProcAll = 0 + KernProcPID = 1 + KernProcProc = 8 + KernProcPathname = 12 + KernProcArgs = 55 + KernProcArgv = 1 + KernProcEnv = 3 +) + +const ( + ArgMax = 256 * 1024 +) + +const ( + sizeofPtr = 0x8 + sizeofShort = 0x2 + sizeofInt = 0x4 + sizeofLong = 0x8 + sizeofLongLong = 0x8 +) + +const ( + sizeOfKinfoVmentry = 0x50 + sizeOfKinfoProc = 0x288 +) + +const ( + SIDL = 1 + SRUN = 2 + SSLEEP = 3 + SSTOP = 4 + SZOMB = 5 + SDEAD = 6 + SONPROC = 7 +) + +type ( + _C_short int16 + _C_int int32 + _C_long int64 + _C_long_long int64 +) + +type Timespec struct { + Sec int64 + Nsec int64 +} + +type Timeval struct { + Sec int64 + Usec int64 +} + +type Rusage struct { + Utime Timeval + Stime Timeval + Maxrss int64 + Ixrss int64 + Idrss int64 + Isrss int64 + Minflt int64 + Majflt int64 + Nswap int64 + Inblock int64 + Oublock int64 + Msgsnd int64 + Msgrcv int64 + Nsignals int64 + Nvcsw int64 + Nivcsw int64 +} + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type KinfoProc struct { + Forw uint64 + Back uint64 + Paddr uint64 + Addr uint64 + Fd uint64 + Stats uint64 + Limit uint64 + Vmspace uint64 + Sigacts uint64 + Sess uint64 + Tsess uint64 + Ru uint64 + Eflag int32 + Exitsig int32 + Flag int32 + Pid int32 + Ppid int32 + Sid int32 + X_pgid int32 + Tpgid int32 + Uid uint32 + Ruid uint32 + Gid uint32 + Rgid uint32 + Groups [16]uint32 + Ngroups int16 + Jobc int16 + Tdev uint32 + Estcpu uint32 + Rtime_sec uint32 + Rtime_usec uint32 + Cpticks int32 + Pctcpu uint32 + Swtime uint32 + Slptime uint32 + Schedflags int32 + Uticks uint64 + Sticks uint64 + Iticks uint64 + Tracep uint64 + Traceflag int32 + Holdcnt int32 + Siglist int32 + Sigmask uint32 + Sigignore uint32 + Sigcatch uint32 + Stat int8 + Priority uint8 + Usrpri uint8 + Nice uint8 + Xstat uint16 + Spare uint16 + Comm [24]int8 + Wmesg [8]uint8 + Wchan uint64 + Login [32]uint8 + Vm_rssize int32 + Vm_tsize int32 + Vm_dsize int32 + Vm_ssize int32 + Uvalid int64 + Ustart_sec uint64 + Ustart_usec uint32 + Uutime_sec uint32 + Uutime_usec uint32 + Ustime_sec uint32 + Ustime_usec uint32 + Uru_maxrss uint64 + Uru_ixrss uint64 + Uru_idrss uint64 + Uru_isrss uint64 + Uru_minflt uint64 + Uru_majflt uint64 + Uru_nswap uint64 + Uru_inblock uint64 + Uru_oublock uint64 + Uru_msgsnd uint64 + Uru_msgrcv uint64 + Uru_nsignals uint64 + Uru_nvcsw uint64 + Uru_nivcsw uint64 + Uctime_sec uint32 + Uctime_usec uint32 + Psflags uint32 + Acflag uint32 + Svuid uint32 + Svgid uint32 + Emul [8]uint8 + Rlim_rss_cur uint64 + Cpuid uint64 + Vm_map_size uint64 + Tid int32 + Rtableid uint32 + Pledge uint64 + Name [24]uint8 +} + +type Priority struct{} + +type KinfoVmentry struct { + Start uint64 + End uint64 + Guard uint64 + Fspace uint64 + Fspace_augment uint64 + Offset uint64 + Wired_count int32 + Etype int32 + Protection int32 + Max_protection int32 + Advice int32 + Inheritance int32 + Flags uint8 + Pad_cgo_0 [7]byte +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go b/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go new file mode 100644 index 0000000000000..726758cae9582 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_plan9.go @@ -0,0 +1,203 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build plan9 + +package process + +import ( + "context" + "syscall" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" +) + +type Signal = syscall.Note + +type MemoryMapsStat struct { + Path string `json:"path"` + Rss uint64 `json:"rss"` + Size uint64 `json:"size"` + Pss uint64 `json:"pss"` + SharedClean uint64 `json:"sharedClean"` + SharedDirty uint64 `json:"sharedDirty"` + PrivateClean uint64 `json:"privateClean"` + PrivateDirty uint64 `json:"privateDirty"` + Referenced uint64 `json:"referenced"` + Anonymous uint64 `json:"anonymous"` + Swap uint64 `json:"swap"` +} + +type MemoryInfoExStat struct{} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { + return []string{""}, common.ErrNotImplementedError +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) SendSignalWithContext(ctx context.Context, sig Signal) error { + return common.ErrNotImplementedError +} + +func (p *Process) SuspendWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) ResumeWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) TerminateWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) KillWithContext(ctx context.Context) error { + return common.ErrNotImplementedError +} + +func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go b/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go new file mode 100644 index 0000000000000..caa9d3f7c03f4 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_posix.go @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build linux || freebsd || openbsd || darwin || solaris + +package process + +import ( + "context" + "errors" + "fmt" + "os" + "os/user" + "path/filepath" + "strconv" + "strings" + "syscall" + + "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v4/internal/common" +) + +type Signal = syscall.Signal + +// POSIX +func getTerminalMap() (map[uint64]string, error) { + ret := make(map[uint64]string) + var termfiles []string + + d, err := os.Open("/dev") + if err != nil { + return nil, err + } + defer d.Close() + + devnames, err := d.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, devname := range devnames { + if strings.HasPrefix(devname, "/dev/tty") { + termfiles = append(termfiles, "/dev/tty/"+devname) + } + } + + var ptsnames []string + ptsd, err := os.Open("/dev/pts") + if err != nil { + ptsnames, _ = filepath.Glob("/dev/ttyp*") + if ptsnames == nil { + return nil, err + } + } + defer ptsd.Close() + + if ptsnames == nil { + defer ptsd.Close() + ptsnames, err = ptsd.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, ptsname := range ptsnames { + termfiles = append(termfiles, "/dev/pts/"+ptsname) + } + } else { + termfiles = ptsnames + } + + for _, name := range termfiles { + stat := unix.Stat_t{} + if err = unix.Stat(name, &stat); err != nil { + return nil, err + } + rdev := uint64(stat.Rdev) + ret[rdev] = strings.Replace(name, "/dev", "", -1) + } + return ret, nil +} + +// isMount is a port of python's os.path.ismount() +// https://github.com/python/cpython/blob/08ff4369afca84587b1c82034af4e9f64caddbf2/Lib/posixpath.py#L186-L216 +// https://docs.python.org/3/library/os.path.html#os.path.ismount +func isMount(path string) bool { + // Check symlinkness with os.Lstat; unix.DT_LNK is not portable + fileInfo, err := os.Lstat(path) + if err != nil { + return false + } + if fileInfo.Mode()&os.ModeSymlink != 0 { + return false + } + var stat1 unix.Stat_t + if err := unix.Lstat(path, &stat1); err != nil { + return false + } + parent := filepath.Join(path, "..") + var stat2 unix.Stat_t + if err := unix.Lstat(parent, &stat2); err != nil { + return false + } + return stat1.Dev != stat2.Dev || stat1.Ino == stat2.Ino +} + +func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { + if pid <= 0 { + return false, fmt.Errorf("invalid pid %v", pid) + } + proc, err := os.FindProcess(int(pid)) + if err != nil { + return false, err + } + + if isMount(common.HostProcWithContext(ctx)) { // if //proc exists and is mounted, check if //proc/ folder exists + _, err := os.Stat(common.HostProcWithContext(ctx, strconv.Itoa(int(pid)))) + if os.IsNotExist(err) { + return false, nil + } + return err == nil, err + } + + // procfs does not exist or is not mounted, check PID existence by signalling the pid + err = proc.Signal(syscall.Signal(0)) + if err == nil { + return true, nil + } + if errors.Is(err, os.ErrProcessDone) { + return false, nil + } + var errno syscall.Errno + if !errors.As(err, &errno) { + return false, err + } + switch errno { + case syscall.ESRCH: + return false, nil + case syscall.EPERM: + return true, nil + } + + return false, err +} + +func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error { + process, err := os.FindProcess(int(p.Pid)) + if err != nil { + return err + } + + err = process.Signal(sig) + if err != nil { + return err + } + + return nil +} + +func (p *Process) SuspendWithContext(ctx context.Context) error { + return p.SendSignalWithContext(ctx, unix.SIGSTOP) +} + +func (p *Process) ResumeWithContext(ctx context.Context) error { + return p.SendSignalWithContext(ctx, unix.SIGCONT) +} + +func (p *Process) TerminateWithContext(ctx context.Context) error { + return p.SendSignalWithContext(ctx, unix.SIGTERM) +} + +func (p *Process) KillWithContext(ctx context.Context) error { + return p.SendSignalWithContext(ctx, unix.SIGKILL) +} + +func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { + uids, err := p.UidsWithContext(ctx) + if err != nil { + return "", err + } + if len(uids) > 0 { + u, err := user.LookupId(strconv.Itoa(int(uids[0]))) + if err != nil { + return "", err + } + return u.Username, nil + } + return "", nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go new file mode 100644 index 0000000000000..04f86f16b5e1a --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_solaris.go @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: BSD-3-Clause +package process + +import ( + "bytes" + "context" + "os" + "strconv" + "strings" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" +) + +type MemoryMapsStat struct { + Path string `json:"path"` + Rss uint64 `json:"rss"` + Size uint64 `json:"size"` + Pss uint64 `json:"pss"` + SharedClean uint64 `json:"sharedClean"` + SharedDirty uint64 `json:"sharedDirty"` + PrivateClean uint64 `json:"privateClean"` + PrivateDirty uint64 `json:"privateDirty"` + Referenced uint64 `json:"referenced"` + Anonymous uint64 `json:"anonymous"` + Swap uint64 `json:"swap"` +} + +type MemoryInfoExStat struct{} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + return readPidsFromDir(common.HostProcWithContext(ctx)) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, err + } + + for _, pid := range pids { + p, err := NewProcessWithContext(ctx, pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + exe, err := p.fillFromPathAOutWithContext(ctx) + if os.IsNotExist(err) { + exe, err = p.fillFromExecnameWithContext(ctx) + } + return exe, err +} + +func (p *Process) CmdlineWithContext(ctx context.Context) (string, error) { + return p.fillFromCmdlineWithContext(ctx) +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + return p.fillSliceFromCmdlineWithContext(ctx) +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) CwdWithContext(ctx context.Context) (string, error) { + return p.fillFromPathCwdWithContext(ctx) +} + +func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { + return []string{""}, common.ErrNotImplementedError +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + _, fnames, err := p.fillFromfdListWithContext(ctx) + return int32(len(fnames)), err +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { + return nil, common.ErrNotImplementedError +} + +/** +** Internal functions +**/ + +func (p *Process) fillFromfdListWithContext(ctx context.Context) (string, []string, error) { + pid := p.Pid + statPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "fd") + d, err := os.Open(statPath) + if err != nil { + return statPath, []string{}, err + } + defer d.Close() + fnames, err := d.Readdirnames(-1) + return statPath, fnames, err +} + +func (p *Process) fillFromPathCwdWithContext(ctx context.Context) (string, error) { + pid := p.Pid + cwdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "path", "cwd") + cwd, err := os.Readlink(cwdPath) + if err != nil { + return "", err + } + return cwd, nil +} + +func (p *Process) fillFromPathAOutWithContext(ctx context.Context) (string, error) { + pid := p.Pid + cwdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "path", "a.out") + exe, err := os.Readlink(cwdPath) + if err != nil { + return "", err + } + return exe, nil +} + +func (p *Process) fillFromExecnameWithContext(ctx context.Context) (string, error) { + pid := p.Pid + execNamePath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "execname") + exe, err := os.ReadFile(execNamePath) + if err != nil { + return "", err + } + return string(exe), nil +} + +func (p *Process) fillFromCmdlineWithContext(ctx context.Context) (string, error) { + pid := p.Pid + cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline") + cmdline, err := os.ReadFile(cmdPath) + if err != nil { + return "", err + } + ret := strings.FieldsFunc(string(cmdline), func(r rune) bool { + if r == '\u0000' { + return true + } + return false + }) + + return strings.Join(ret, " "), nil +} + +func (p *Process) fillSliceFromCmdlineWithContext(ctx context.Context) ([]string, error) { + pid := p.Pid + cmdPath := common.HostProcWithContext(ctx, strconv.Itoa(int(pid)), "cmdline") + cmdline, err := os.ReadFile(cmdPath) + if err != nil { + return nil, err + } + if len(cmdline) == 0 { + return nil, nil + } + if cmdline[len(cmdline)-1] == 0 { + cmdline = cmdline[:len(cmdline)-1] + } + parts := bytes.Split(cmdline, []byte{0}) + var strParts []string + for _, p := range parts { + strParts = append(strParts, string(p)) + } + + return strParts, nil +} + +func readPidsFromDir(path string) ([]int32, error) { + var ret []int32 + + d, err := os.Open(path) + if err != nil { + return nil, err + } + defer d.Close() + + fnames, err := d.Readdirnames(-1) + if err != nil { + return nil, err + } + for _, fname := range fnames { + pid, err := strconv.ParseInt(fname, 10, 32) + if err != nil { + // if not numeric name, just skip + continue + } + ret = append(ret, int32(pid)) + } + + return ret, nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go new file mode 100644 index 0000000000000..f3111649a6c7c --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows.go @@ -0,0 +1,1165 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build windows + +package process + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "strings" + "syscall" + "time" + "unicode/utf16" + "unsafe" + + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/internal/common" + "github.com/shirou/gopsutil/v4/net" + "golang.org/x/sys/windows" +) + +type Signal = syscall.Signal + +var ( + modntdll = windows.NewLazySystemDLL("ntdll.dll") + procNtResumeProcess = modntdll.NewProc("NtResumeProcess") + procNtSuspendProcess = modntdll.NewProc("NtSuspendProcess") + + modpsapi = windows.NewLazySystemDLL("psapi.dll") + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetProcessImageFileNameW = modpsapi.NewProc("GetProcessImageFileNameW") + + advapi32 = windows.NewLazySystemDLL("advapi32.dll") + procLookupPrivilegeValue = advapi32.NewProc("LookupPrivilegeValueW") + procAdjustTokenPrivileges = advapi32.NewProc("AdjustTokenPrivileges") + + procQueryFullProcessImageNameW = common.Modkernel32.NewProc("QueryFullProcessImageNameW") + procGetPriorityClass = common.Modkernel32.NewProc("GetPriorityClass") + procGetProcessIoCounters = common.Modkernel32.NewProc("GetProcessIoCounters") + procGetNativeSystemInfo = common.Modkernel32.NewProc("GetNativeSystemInfo") + + processorArchitecture uint +) + +const processQueryInformation = windows.PROCESS_QUERY_LIMITED_INFORMATION + +type systemProcessorInformation struct { + ProcessorArchitecture uint16 + ProcessorLevel uint16 + ProcessorRevision uint16 + Reserved uint16 + ProcessorFeatureBits uint16 +} + +type systemInfo struct { + wProcessorArchitecture uint16 + wReserved uint16 + dwpageSize uint32 + lpMinimumApplicationAddress uintptr + lpMaximumApplicationAddress uintptr + dwActiveProcessorMask uintptr + dwNumberOfProcessors uint32 + dwProcessorType uint32 + dwAllocationGranularity uint32 + wProcessorLevel uint16 + wProcessorRevision uint16 +} + +// Memory_info_ex is different between OSes +type MemoryInfoExStat struct{} + +type MemoryMapsStat struct{} + +// ioCounters is an equivalent representation of IO_COUNTERS in the Windows API. +// https://docs.microsoft.com/windows/win32/api/winnt/ns-winnt-io_counters +type ioCounters struct { + ReadOperationCount uint64 + WriteOperationCount uint64 + OtherOperationCount uint64 + ReadTransferCount uint64 + WriteTransferCount uint64 + OtherTransferCount uint64 +} + +type processBasicInformation32 struct { + Reserved1 uint32 + PebBaseAddress uint32 + Reserved2 uint32 + Reserved3 uint32 + UniqueProcessId uint32 + Reserved4 uint32 +} + +type processBasicInformation64 struct { + Reserved1 uint64 + PebBaseAddress uint64 + Reserved2 uint64 + Reserved3 uint64 + UniqueProcessId uint64 + Reserved4 uint64 +} + +type processEnvironmentBlock32 struct { + Reserved1 [2]uint8 + BeingDebugged uint8 + Reserved2 uint8 + Reserved3 [2]uint32 + Ldr uint32 + ProcessParameters uint32 + // More fields which we don't use so far +} + +type processEnvironmentBlock64 struct { + Reserved1 [2]uint8 + BeingDebugged uint8 + Reserved2 uint8 + _ [4]uint8 // padding, since we are 64 bit, the next pointer is 64 bit aligned (when compiling for 32 bit, this is not the case without manual padding) + Reserved3 [2]uint64 + Ldr uint64 + ProcessParameters uint64 + // More fields which we don't use so far +} + +type rtlUserProcessParameters32 struct { + Reserved1 [16]uint8 + ConsoleHandle uint32 + ConsoleFlags uint32 + StdInputHandle uint32 + StdOutputHandle uint32 + StdErrorHandle uint32 + CurrentDirectoryPathNameLength uint16 + _ uint16 // Max Length + CurrentDirectoryPathAddress uint32 + CurrentDirectoryHandle uint32 + DllPathNameLength uint16 + _ uint16 // Max Length + DllPathAddress uint32 + ImagePathNameLength uint16 + _ uint16 // Max Length + ImagePathAddress uint32 + CommandLineLength uint16 + _ uint16 // Max Length + CommandLineAddress uint32 + EnvironmentAddress uint32 + // More fields which we don't use so far +} + +type rtlUserProcessParameters64 struct { + Reserved1 [16]uint8 + ConsoleHandle uint64 + ConsoleFlags uint64 + StdInputHandle uint64 + StdOutputHandle uint64 + StdErrorHandle uint64 + CurrentDirectoryPathNameLength uint16 + _ uint16 // Max Length + _ uint32 // Padding + CurrentDirectoryPathAddress uint64 + CurrentDirectoryHandle uint64 + DllPathNameLength uint16 + _ uint16 // Max Length + _ uint32 // Padding + DllPathAddress uint64 + ImagePathNameLength uint16 + _ uint16 // Max Length + _ uint32 // Padding + ImagePathAddress uint64 + CommandLineLength uint16 + _ uint16 // Max Length + _ uint32 // Padding + CommandLineAddress uint64 + EnvironmentAddress uint64 + // More fields which we don't use so far +} + +type winLUID struct { + LowPart winDWord + HighPart winLong +} + +// LUID_AND_ATTRIBUTES +type winLUIDAndAttributes struct { + Luid winLUID + Attributes winDWord +} + +// TOKEN_PRIVILEGES +type winTokenPrivileges struct { + PrivilegeCount winDWord + Privileges [1]winLUIDAndAttributes +} + +type ( + winLong int32 + winDWord uint32 +) + +func init() { + var systemInfo systemInfo + + procGetNativeSystemInfo.Call(uintptr(unsafe.Pointer(&systemInfo))) + processorArchitecture = uint(systemInfo.wProcessorArchitecture) + + // enable SeDebugPrivilege https://github.com/midstar/proci/blob/6ec79f57b90ba3d9efa2a7b16ef9c9369d4be875/proci_windows.go#L80-L119 + handle, err := syscall.GetCurrentProcess() + if err != nil { + return + } + + var token syscall.Token + err = syscall.OpenProcessToken(handle, 0x0028, &token) + if err != nil { + return + } + defer token.Close() + + tokenPrivileges := winTokenPrivileges{PrivilegeCount: 1} + lpName := syscall.StringToUTF16("SeDebugPrivilege") + ret, _, _ := procLookupPrivilegeValue.Call( + 0, + uintptr(unsafe.Pointer(&lpName[0])), + uintptr(unsafe.Pointer(&tokenPrivileges.Privileges[0].Luid))) + if ret == 0 { + return + } + + tokenPrivileges.Privileges[0].Attributes = 0x00000002 // SE_PRIVILEGE_ENABLED + + procAdjustTokenPrivileges.Call( + uintptr(token), + 0, + uintptr(unsafe.Pointer(&tokenPrivileges)), + uintptr(unsafe.Sizeof(tokenPrivileges)), + 0, + 0) +} + +func pidsWithContext(ctx context.Context) ([]int32, error) { + // inspired by https://gist.github.com/henkman/3083408 + // and https://github.com/giampaolo/psutil/blob/1c3a15f637521ba5c0031283da39c733fda53e4c/psutil/arch/windows/process_info.c#L315-L329 + var ret []int32 + var read uint32 = 0 + var psSize uint32 = 1024 + const dwordSize uint32 = 4 + + for { + ps := make([]uint32, psSize) + if err := windows.EnumProcesses(ps, &read); err != nil { + return nil, err + } + if uint32(len(ps)) == read/dwordSize { // ps buffer was too small to host every results, retry with a bigger one + psSize += 1024 + continue + } + for _, pid := range ps[:read/dwordSize] { + ret = append(ret, int32(pid)) + } + return ret, nil + + } +} + +func PidExistsWithContext(ctx context.Context, pid int32) (bool, error) { + if pid == 0 { // special case for pid 0 System Idle Process + return true, nil + } + if pid < 0 { + return false, fmt.Errorf("invalid pid %v", pid) + } + if pid%4 != 0 { + // OpenProcess will succeed even on non-existing pid here https://devblogs.microsoft.com/oldnewthing/20080606-00/?p=22043 + // so we list every pid just to be sure and be future-proof + pids, err := PidsWithContext(ctx) + if err != nil { + return false, err + } + for _, i := range pids { + if i == pid { + return true, err + } + } + return false, err + } + h, err := windows.OpenProcess(windows.SYNCHRONIZE, false, uint32(pid)) + if err == windows.ERROR_ACCESS_DENIED { + return true, nil + } + if err == windows.ERROR_INVALID_PARAMETER { + return false, nil + } + if err != nil { + return false, err + } + defer windows.CloseHandle(h) + event, err := windows.WaitForSingleObject(h, 0) + return event == uint32(windows.WAIT_TIMEOUT), err +} + +func (p *Process) PpidWithContext(ctx context.Context) (int32, error) { + // if cached already, return from cache + cachedPpid := p.getPpid() + if cachedPpid != 0 { + return cachedPpid, nil + } + + ppid, _, _, err := getFromSnapProcess(p.Pid) + if err != nil { + return 0, err + } + + // no errors and not cached already, so cache it + p.setPpid(ppid) + + return ppid, nil +} + +func (p *Process) NameWithContext(ctx context.Context) (string, error) { + if p.Pid == 0 { + return "System Idle Process", nil + } + if p.Pid == 4 { + return "System", nil + } + + exe, err := p.ExeWithContext(ctx) + if err != nil { + return "", fmt.Errorf("could not get Name: %s", err) + } + + return filepath.Base(exe), nil +} + +func (p *Process) TgidWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) ExeWithContext(ctx context.Context) (string, error) { + c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) + if err != nil { + return "", err + } + defer windows.CloseHandle(c) + buf := make([]uint16, syscall.MAX_LONG_PATH) + size := uint32(syscall.MAX_LONG_PATH) + if err := procQueryFullProcessImageNameW.Find(); err == nil { // Vista+ + ret, _, err := procQueryFullProcessImageNameW.Call( + uintptr(c), + uintptr(0), + uintptr(unsafe.Pointer(&buf[0])), + uintptr(unsafe.Pointer(&size))) + if ret == 0 { + return "", err + } + return windows.UTF16ToString(buf[:]), nil + } + // XP fallback + ret, _, err := procGetProcessImageFileNameW.Call(uintptr(c), uintptr(unsafe.Pointer(&buf[0])), uintptr(size)) + if ret == 0 { + return "", err + } + return common.ConvertDOSPath(windows.UTF16ToString(buf[:])), nil +} + +func (p *Process) CmdlineWithContext(_ context.Context) (string, error) { + cmdline, err := getProcessCommandLine(p.Pid) + if err != nil { + return "", fmt.Errorf("could not get CommandLine: %s", err) + } + return cmdline, nil +} + +func (p *Process) CmdlineSliceWithContext(ctx context.Context) ([]string, error) { + cmdline, err := p.CmdlineWithContext(ctx) + if err != nil { + return nil, err + } + return strings.Split(cmdline, " "), nil +} + +func (p *Process) createTimeWithContext(ctx context.Context) (int64, error) { + ru, err := getRusage(p.Pid) + if err != nil { + return 0, fmt.Errorf("could not get CreationDate: %s", err) + } + + return ru.CreationTime.Nanoseconds() / 1000000, nil +} + +func (p *Process) CwdWithContext(_ context.Context) (string, error) { + h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(p.Pid)) + if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { + return "", nil + } + if err != nil { + return "", err + } + defer syscall.CloseHandle(syscall.Handle(h)) + + procIs32Bits := is32BitProcess(h) + + if procIs32Bits { + userProcParams, err := getUserProcessParams32(h) + if err != nil { + return "", err + } + if userProcParams.CurrentDirectoryPathNameLength > 0 { + cwd := readProcessMemory(syscall.Handle(h), procIs32Bits, uint64(userProcParams.CurrentDirectoryPathAddress), uint(userProcParams.CurrentDirectoryPathNameLength)) + if len(cwd) != int(userProcParams.CurrentDirectoryPathNameLength) { + return "", errors.New("cannot read current working directory") + } + + return convertUTF16ToString(cwd), nil + } + } else { + userProcParams, err := getUserProcessParams64(h) + if err != nil { + return "", err + } + if userProcParams.CurrentDirectoryPathNameLength > 0 { + cwd := readProcessMemory(syscall.Handle(h), procIs32Bits, userProcParams.CurrentDirectoryPathAddress, uint(userProcParams.CurrentDirectoryPathNameLength)) + if len(cwd) != int(userProcParams.CurrentDirectoryPathNameLength) { + return "", errors.New("cannot read current working directory") + } + + return convertUTF16ToString(cwd), nil + } + } + + // if we reach here, we have no cwd + return "", nil +} + +func (p *Process) StatusWithContext(ctx context.Context) ([]string, error) { + return []string{""}, common.ErrNotImplementedError +} + +func (p *Process) ForegroundWithContext(ctx context.Context) (bool, error) { + return false, common.ErrNotImplementedError +} + +func (p *Process) UsernameWithContext(ctx context.Context) (string, error) { + pid := p.Pid + c, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) + if err != nil { + return "", err + } + defer windows.CloseHandle(c) + + var token syscall.Token + err = syscall.OpenProcessToken(syscall.Handle(c), syscall.TOKEN_QUERY, &token) + if err != nil { + return "", err + } + defer token.Close() + tokenUser, err := token.GetTokenUser() + if err != nil { + return "", err + } + + user, domain, _, err := tokenUser.User.Sid.LookupAccount("") + return domain + "\\" + user, err +} + +func (p *Process) UidsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) GidsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) GroupsWithContext(ctx context.Context) ([]uint32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) TerminalWithContext(ctx context.Context) (string, error) { + return "", common.ErrNotImplementedError +} + +// priorityClasses maps a win32 priority class to its WMI equivalent Win32_Process.Priority +// https://docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getpriorityclass +// https://docs.microsoft.com/en-us/windows/desktop/cimwin32prov/win32-process +var priorityClasses = map[int]int32{ + 0x00008000: 10, // ABOVE_NORMAL_PRIORITY_CLASS + 0x00004000: 6, // BELOW_NORMAL_PRIORITY_CLASS + 0x00000080: 13, // HIGH_PRIORITY_CLASS + 0x00000040: 4, // IDLE_PRIORITY_CLASS + 0x00000020: 8, // NORMAL_PRIORITY_CLASS + 0x00000100: 24, // REALTIME_PRIORITY_CLASS +} + +func (p *Process) NiceWithContext(ctx context.Context) (int32, error) { + c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) + if err != nil { + return 0, err + } + defer windows.CloseHandle(c) + ret, _, err := procGetPriorityClass.Call(uintptr(c)) + if ret == 0 { + return 0, err + } + priority, ok := priorityClasses[int(ret)] + if !ok { + return 0, fmt.Errorf("unknown priority class %v", ret) + } + return priority, nil +} + +func (p *Process) IOniceWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) RlimitWithContext(ctx context.Context) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) RlimitUsageWithContext(ctx context.Context, gatherUsed bool) ([]RlimitStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) IOCountersWithContext(ctx context.Context) (*IOCountersStat, error) { + c, err := windows.OpenProcess(processQueryInformation, false, uint32(p.Pid)) + if err != nil { + return nil, err + } + defer windows.CloseHandle(c) + var ioCounters ioCounters + ret, _, err := procGetProcessIoCounters.Call(uintptr(c), uintptr(unsafe.Pointer(&ioCounters))) + if ret == 0 { + return nil, err + } + stats := &IOCountersStat{ + ReadCount: ioCounters.ReadOperationCount, + ReadBytes: ioCounters.ReadTransferCount, + WriteCount: ioCounters.WriteOperationCount, + WriteBytes: ioCounters.WriteTransferCount, + } + + return stats, nil +} + +func (p *Process) NumCtxSwitchesWithContext(ctx context.Context) (*NumCtxSwitchesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) NumFDsWithContext(ctx context.Context) (int32, error) { + return 0, common.ErrNotImplementedError +} + +func (p *Process) NumThreadsWithContext(ctx context.Context) (int32, error) { + ppid, ret, _, err := getFromSnapProcess(p.Pid) + if err != nil { + return 0, err + } + + // if no errors and not cached already, cache ppid + p.parent = ppid + if 0 == p.getPpid() { + p.setPpid(ppid) + } + + return ret, nil +} + +func (p *Process) ThreadsWithContext(ctx context.Context) (map[int32]*cpu.TimesStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) TimesWithContext(ctx context.Context) (*cpu.TimesStat, error) { + sysTimes, err := getProcessCPUTimes(p.Pid) + if err != nil { + return nil, err + } + + // User and kernel times are represented as a FILETIME structure + // which contains a 64-bit value representing the number of + // 100-nanosecond intervals since January 1, 1601 (UTC): + // http://msdn.microsoft.com/en-us/library/ms724284(VS.85).aspx + // To convert it into a float representing the seconds that the + // process has executed in user/kernel mode I borrowed the code + // below from psutil's _psutil_windows.c, and in turn from Python's + // Modules/posixmodule.c + + user := float64(sysTimes.UserTime.HighDateTime)*429.4967296 + float64(sysTimes.UserTime.LowDateTime)*1e-7 + kernel := float64(sysTimes.KernelTime.HighDateTime)*429.4967296 + float64(sysTimes.KernelTime.LowDateTime)*1e-7 + + return &cpu.TimesStat{ + User: user, + System: kernel, + }, nil +} + +func (p *Process) CPUAffinityWithContext(ctx context.Context) ([]int32, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryInfoWithContext(ctx context.Context) (*MemoryInfoStat, error) { + mem, err := getMemoryInfo(p.Pid) + if err != nil { + return nil, err + } + + ret := &MemoryInfoStat{ + RSS: uint64(mem.WorkingSetSize), + VMS: uint64(mem.PagefileUsage), + } + + return ret, nil +} + +func (p *Process) MemoryInfoExWithContext(ctx context.Context) (*MemoryInfoExStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) PageFaultsWithContext(ctx context.Context) (*PageFaultsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) ChildrenWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + snap, err := windows.CreateToolhelp32Snapshot(windows.TH32CS_SNAPPROCESS, uint32(0)) + if err != nil { + return out, err + } + defer windows.CloseHandle(snap) + var pe32 windows.ProcessEntry32 + pe32.Size = uint32(unsafe.Sizeof(pe32)) + if err := windows.Process32First(snap, &pe32); err != nil { + return out, err + } + for { + if pe32.ParentProcessID == uint32(p.Pid) { + p, err := NewProcessWithContext(ctx, int32(pe32.ProcessID)) + if err == nil { + out = append(out, p) + } + } + if err = windows.Process32Next(snap, &pe32); err != nil { + break + } + } + return out, nil +} + +func (p *Process) OpenFilesWithContext(ctx context.Context) ([]OpenFilesStat, error) { + files := make([]OpenFilesStat, 0) + fileExists := make(map[string]bool) + + process, err := windows.OpenProcess(common.ProcessQueryInformation, false, uint32(p.Pid)) + if err != nil { + return nil, err + } + + buffer := make([]byte, 1024) + var size uint32 + + st := common.CallWithExpandingBuffer( + func() common.NtStatus { + return common.NtQuerySystemInformation( + common.SystemExtendedHandleInformationClass, + &buffer[0], + uint32(len(buffer)), + &size, + ) + }, + &buffer, + &size, + ) + if st.IsError() { + return nil, st.Error() + } + + handlesList := (*common.SystemExtendedHandleInformation)(unsafe.Pointer(&buffer[0])) + handles := make([]common.SystemExtendedHandleTableEntryInformation, int(handlesList.NumberOfHandles)) + hdr := (*reflect.SliceHeader)(unsafe.Pointer(&handles)) + hdr.Data = uintptr(unsafe.Pointer(&handlesList.Handles[0])) + + currentProcess, err := windows.GetCurrentProcess() + if err != nil { + return nil, err + } + + for _, handle := range handles { + var file uintptr + if int32(handle.UniqueProcessId) != p.Pid { + continue + } + if windows.DuplicateHandle(process, windows.Handle(handle.HandleValue), currentProcess, (*windows.Handle)(&file), + 0, true, windows.DUPLICATE_SAME_ACCESS) != nil { + continue + } + // release the new handle + defer windows.CloseHandle(windows.Handle(file)) + + fileType, err := windows.GetFileType(windows.Handle(file)) + if err != nil || fileType != windows.FILE_TYPE_DISK { + continue + } + + var fileName string + ch := make(chan struct{}) + + go func() { + var buf [syscall.MAX_LONG_PATH]uint16 + n, err := windows.GetFinalPathNameByHandle(windows.Handle(file), &buf[0], syscall.MAX_LONG_PATH, 0) + if err != nil { + return + } + + fileName = string(utf16.Decode(buf[:n])) + ch <- struct{}{} + }() + + select { + case <-time.NewTimer(100 * time.Millisecond).C: + continue + case <-ch: + fileInfo, err := os.Stat(fileName) + if err != nil || fileInfo.IsDir() { + continue + } + + if _, exists := fileExists[fileName]; !exists { + files = append(files, OpenFilesStat{ + Path: fileName, + Fd: uint64(file), + }) + fileExists[fileName] = true + } + case <-ctx.Done(): + return files, ctx.Err() + } + } + + return files, nil +} + +func (p *Process) ConnectionsWithContext(ctx context.Context) ([]net.ConnectionStat, error) { + return net.ConnectionsPidWithContext(ctx, "all", p.Pid) +} + +func (p *Process) ConnectionsMaxWithContext(ctx context.Context, max int) ([]net.ConnectionStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) MemoryMapsWithContext(ctx context.Context, grouped bool) (*[]MemoryMapsStat, error) { + return nil, common.ErrNotImplementedError +} + +func (p *Process) SendSignalWithContext(ctx context.Context, sig syscall.Signal) error { + return common.ErrNotImplementedError +} + +func (p *Process) SuspendWithContext(ctx context.Context) error { + c, err := windows.OpenProcess(windows.PROCESS_SUSPEND_RESUME, false, uint32(p.Pid)) + if err != nil { + return err + } + defer windows.CloseHandle(c) + + r1, _, _ := procNtSuspendProcess.Call(uintptr(c)) + if r1 != 0 { + // See https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-erref/596a1078-e883-4972-9bbc-49e60bebca55 + return fmt.Errorf("NtStatus='0x%.8X'", r1) + } + + return nil +} + +func (p *Process) ResumeWithContext(ctx context.Context) error { + c, err := windows.OpenProcess(windows.PROCESS_SUSPEND_RESUME, false, uint32(p.Pid)) + if err != nil { + return err + } + defer windows.CloseHandle(c) + + r1, _, _ := procNtResumeProcess.Call(uintptr(c)) + if r1 != 0 { + // See https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-erref/596a1078-e883-4972-9bbc-49e60bebca55 + return fmt.Errorf("NtStatus='0x%.8X'", r1) + } + + return nil +} + +func (p *Process) TerminateWithContext(ctx context.Context) error { + proc, err := windows.OpenProcess(windows.PROCESS_TERMINATE, false, uint32(p.Pid)) + if err != nil { + return err + } + err = windows.TerminateProcess(proc, 0) + windows.CloseHandle(proc) + return err +} + +func (p *Process) KillWithContext(ctx context.Context) error { + process, err := os.FindProcess(int(p.Pid)) + if err != nil { + return err + } + return process.Kill() +} + +func (p *Process) EnvironWithContext(ctx context.Context) ([]string, error) { + envVars, err := getProcessEnvironmentVariables(p.Pid, ctx) + if err != nil { + return nil, fmt.Errorf("could not get environment variables: %s", err) + } + return envVars, nil +} + +// retrieve Ppid in a thread-safe manner +func (p *Process) getPpid() int32 { + p.parentMutex.RLock() + defer p.parentMutex.RUnlock() + return p.parent +} + +// cache Ppid in a thread-safe manner (WINDOWS ONLY) +// see https://psutil.readthedocs.io/en/latest/#psutil.Process.ppid +func (p *Process) setPpid(ppid int32) { + p.parentMutex.Lock() + defer p.parentMutex.Unlock() + p.parent = ppid +} + +func getFromSnapProcess(pid int32) (int32, int32, string, error) { + snap, err := windows.CreateToolhelp32Snapshot(windows.TH32CS_SNAPPROCESS, uint32(pid)) + if err != nil { + return 0, 0, "", err + } + defer windows.CloseHandle(snap) + var pe32 windows.ProcessEntry32 + pe32.Size = uint32(unsafe.Sizeof(pe32)) + if err = windows.Process32First(snap, &pe32); err != nil { + return 0, 0, "", err + } + for { + if pe32.ProcessID == uint32(pid) { + szexe := windows.UTF16ToString(pe32.ExeFile[:]) + return int32(pe32.ParentProcessID), int32(pe32.Threads), szexe, nil + } + if err = windows.Process32Next(snap, &pe32); err != nil { + break + } + } + return 0, 0, "", fmt.Errorf("couldn't find pid: %d", pid) +} + +func ProcessesWithContext(ctx context.Context) ([]*Process, error) { + out := []*Process{} + + pids, err := PidsWithContext(ctx) + if err != nil { + return out, fmt.Errorf("could not get Processes %s", err) + } + + for _, pid := range pids { + p, err := NewProcessWithContext(ctx, pid) + if err != nil { + continue + } + out = append(out, p) + } + + return out, nil +} + +func getRusage(pid int32) (*windows.Rusage, error) { + var CPU windows.Rusage + + c, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) + if err != nil { + return nil, err + } + defer windows.CloseHandle(c) + + if err := windows.GetProcessTimes(c, &CPU.CreationTime, &CPU.ExitTime, &CPU.KernelTime, &CPU.UserTime); err != nil { + return nil, err + } + + return &CPU, nil +} + +func getMemoryInfo(pid int32) (PROCESS_MEMORY_COUNTERS, error) { + var mem PROCESS_MEMORY_COUNTERS + c, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) + if err != nil { + return mem, err + } + defer windows.CloseHandle(c) + if err := getProcessMemoryInfo(c, &mem); err != nil { + return mem, err + } + + return mem, err +} + +func getProcessMemoryInfo(h windows.Handle, mem *PROCESS_MEMORY_COUNTERS) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessMemoryInfo.Addr(), 3, uintptr(h), uintptr(unsafe.Pointer(mem)), uintptr(unsafe.Sizeof(*mem))) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +type SYSTEM_TIMES struct { + CreateTime syscall.Filetime + ExitTime syscall.Filetime + KernelTime syscall.Filetime + UserTime syscall.Filetime +} + +func getProcessCPUTimes(pid int32) (SYSTEM_TIMES, error) { + var times SYSTEM_TIMES + + h, err := windows.OpenProcess(processQueryInformation, false, uint32(pid)) + if err != nil { + return times, err + } + defer windows.CloseHandle(h) + + err = syscall.GetProcessTimes( + syscall.Handle(h), + ×.CreateTime, + ×.ExitTime, + ×.KernelTime, + ×.UserTime, + ) + + return times, err +} + +func getUserProcessParams32(handle windows.Handle) (rtlUserProcessParameters32, error) { + pebAddress, err := queryPebAddress(syscall.Handle(handle), true) + if err != nil { + return rtlUserProcessParameters32{}, fmt.Errorf("cannot locate process PEB: %w", err) + } + + buf := readProcessMemory(syscall.Handle(handle), true, pebAddress, uint(unsafe.Sizeof(processEnvironmentBlock32{}))) + if len(buf) != int(unsafe.Sizeof(processEnvironmentBlock32{})) { + return rtlUserProcessParameters32{}, fmt.Errorf("cannot read process PEB") + } + peb := (*processEnvironmentBlock32)(unsafe.Pointer(&buf[0])) + userProcessAddress := uint64(peb.ProcessParameters) + buf = readProcessMemory(syscall.Handle(handle), true, userProcessAddress, uint(unsafe.Sizeof(rtlUserProcessParameters32{}))) + if len(buf) != int(unsafe.Sizeof(rtlUserProcessParameters32{})) { + return rtlUserProcessParameters32{}, fmt.Errorf("cannot read user process parameters") + } + return *(*rtlUserProcessParameters32)(unsafe.Pointer(&buf[0])), nil +} + +func getUserProcessParams64(handle windows.Handle) (rtlUserProcessParameters64, error) { + pebAddress, err := queryPebAddress(syscall.Handle(handle), false) + if err != nil { + return rtlUserProcessParameters64{}, fmt.Errorf("cannot locate process PEB: %w", err) + } + + buf := readProcessMemory(syscall.Handle(handle), false, pebAddress, uint(unsafe.Sizeof(processEnvironmentBlock64{}))) + if len(buf) != int(unsafe.Sizeof(processEnvironmentBlock64{})) { + return rtlUserProcessParameters64{}, fmt.Errorf("cannot read process PEB") + } + peb := (*processEnvironmentBlock64)(unsafe.Pointer(&buf[0])) + userProcessAddress := peb.ProcessParameters + buf = readProcessMemory(syscall.Handle(handle), false, userProcessAddress, uint(unsafe.Sizeof(rtlUserProcessParameters64{}))) + if len(buf) != int(unsafe.Sizeof(rtlUserProcessParameters64{})) { + return rtlUserProcessParameters64{}, fmt.Errorf("cannot read user process parameters") + } + return *(*rtlUserProcessParameters64)(unsafe.Pointer(&buf[0])), nil +} + +func is32BitProcess(h windows.Handle) bool { + const ( + PROCESSOR_ARCHITECTURE_INTEL = 0 + PROCESSOR_ARCHITECTURE_ARM = 5 + PROCESSOR_ARCHITECTURE_ARM64 = 12 + PROCESSOR_ARCHITECTURE_IA64 = 6 + PROCESSOR_ARCHITECTURE_AMD64 = 9 + ) + + var procIs32Bits bool + switch processorArchitecture { + case PROCESSOR_ARCHITECTURE_INTEL, PROCESSOR_ARCHITECTURE_ARM: + procIs32Bits = true + case PROCESSOR_ARCHITECTURE_ARM64, PROCESSOR_ARCHITECTURE_IA64, PROCESSOR_ARCHITECTURE_AMD64: + var wow64 uint + + ret, _, _ := common.ProcNtQueryInformationProcess.Call( + uintptr(h), + uintptr(common.ProcessWow64Information), + uintptr(unsafe.Pointer(&wow64)), + uintptr(unsafe.Sizeof(wow64)), + uintptr(0), + ) + if int(ret) >= 0 { + if wow64 != 0 { + procIs32Bits = true + } + } else { + // if the OS does not support the call, we fallback into the bitness of the app + if unsafe.Sizeof(wow64) == 4 { + procIs32Bits = true + } + } + + default: + // for other unknown platforms, we rely on process platform + if unsafe.Sizeof(processorArchitecture) == 8 { + procIs32Bits = false + } else { + procIs32Bits = true + } + } + return procIs32Bits +} + +func getProcessEnvironmentVariables(pid int32, ctx context.Context) ([]string, error) { + h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(pid)) + if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { + return nil, nil + } + if err != nil { + return nil, err + } + defer syscall.CloseHandle(syscall.Handle(h)) + + procIs32Bits := is32BitProcess(h) + + var processParameterBlockAddress uint64 + + if procIs32Bits { + peb, err := getUserProcessParams32(h) + if err != nil { + return nil, err + } + processParameterBlockAddress = uint64(peb.EnvironmentAddress) + } else { + peb, err := getUserProcessParams64(h) + if err != nil { + return nil, err + } + processParameterBlockAddress = peb.EnvironmentAddress + } + envvarScanner := bufio.NewScanner(&processReader{ + processHandle: h, + is32BitProcess: procIs32Bits, + offset: processParameterBlockAddress, + }) + envvarScanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + // Check for UTF-16 zero character + for i := 0; i < len(data)-1; i += 2 { + if data[i] == 0 && data[i+1] == 0 { + return i + 2, data[0:i], nil + } + } + if atEOF { + return len(data), data, nil + } + // Request more data + return 0, nil, nil + }) + var envVars []string + for envvarScanner.Scan() { + entry := envvarScanner.Bytes() + if len(entry) == 0 { + break // Block is finished + } + envVars = append(envVars, convertUTF16ToString(entry)) + select { + case <-ctx.Done(): + break + default: + continue + } + } + if err := envvarScanner.Err(); err != nil { + return nil, err + } + return envVars, nil +} + +type processReader struct { + processHandle windows.Handle + is32BitProcess bool + offset uint64 +} + +func (p *processReader) Read(buf []byte) (int, error) { + processMemory := readProcessMemory(syscall.Handle(p.processHandle), p.is32BitProcess, p.offset, uint(len(buf))) + if len(processMemory) == 0 { + return 0, io.EOF + } + copy(buf, processMemory) + p.offset += uint64(len(processMemory)) + return len(processMemory), nil +} + +func getProcessCommandLine(pid int32) (string, error) { + h, err := windows.OpenProcess(processQueryInformation|windows.PROCESS_VM_READ, false, uint32(pid)) + if err == windows.ERROR_ACCESS_DENIED || err == windows.ERROR_INVALID_PARAMETER { + return "", nil + } + if err != nil { + return "", err + } + defer syscall.CloseHandle(syscall.Handle(h)) + + procIs32Bits := is32BitProcess(h) + + if procIs32Bits { + userProcParams, err := getUserProcessParams32(h) + if err != nil { + return "", err + } + if userProcParams.CommandLineLength > 0 { + cmdLine := readProcessMemory(syscall.Handle(h), procIs32Bits, uint64(userProcParams.CommandLineAddress), uint(userProcParams.CommandLineLength)) + if len(cmdLine) != int(userProcParams.CommandLineLength) { + return "", errors.New("cannot read cmdline") + } + + return convertUTF16ToString(cmdLine), nil + } + } else { + userProcParams, err := getUserProcessParams64(h) + if err != nil { + return "", err + } + if userProcParams.CommandLineLength > 0 { + cmdLine := readProcessMemory(syscall.Handle(h), procIs32Bits, userProcParams.CommandLineAddress, uint(userProcParams.CommandLineLength)) + if len(cmdLine) != int(userProcParams.CommandLineLength) { + return "", errors.New("cannot read cmdline") + } + + return convertUTF16ToString(cmdLine), nil + } + } + + // if we reach here, we have no command line + return "", nil +} + +func convertUTF16ToString(src []byte) string { + srcLen := len(src) / 2 + + codePoints := make([]uint16, srcLen) + + srcIdx := 0 + for i := 0; i < srcLen; i++ { + codePoints[i] = uint16(src[srcIdx]) | uint16(src[srcIdx+1])<<8 + srcIdx += 2 + } + return syscall.UTF16ToString(codePoints) +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go new file mode 100644 index 0000000000000..2b231c79d04ac --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_32bit.go @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build (windows && 386) || (windows && arm) + +package process + +import ( + "errors" + "syscall" + "unsafe" + + "github.com/shirou/gopsutil/v4/internal/common" + "golang.org/x/sys/windows" +) + +type PROCESS_MEMORY_COUNTERS struct { + CB uint32 + PageFaultCount uint32 + PeakWorkingSetSize uint32 + WorkingSetSize uint32 + QuotaPeakPagedPoolUsage uint32 + QuotaPagedPoolUsage uint32 + QuotaPeakNonPagedPoolUsage uint32 + QuotaNonPagedPoolUsage uint32 + PagefileUsage uint32 + PeakPagefileUsage uint32 +} + +func queryPebAddress(procHandle syscall.Handle, is32BitProcess bool) (uint64, error) { + if is32BitProcess { + // we are on a 32-bit process reading an external 32-bit process + var info processBasicInformation32 + + ret, _, _ := common.ProcNtQueryInformationProcess.Call( + uintptr(procHandle), + uintptr(common.ProcessBasicInformation), + uintptr(unsafe.Pointer(&info)), + uintptr(unsafe.Sizeof(info)), + uintptr(0), + ) + if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { + return uint64(info.PebBaseAddress), nil + } else { + return 0, windows.NTStatus(ret) + } + } else { + // we are on a 32-bit process reading an external 64-bit process + if common.ProcNtWow64QueryInformationProcess64.Find() == nil { // avoid panic + var info processBasicInformation64 + + ret, _, _ := common.ProcNtWow64QueryInformationProcess64.Call( + uintptr(procHandle), + uintptr(common.ProcessBasicInformation), + uintptr(unsafe.Pointer(&info)), + uintptr(unsafe.Sizeof(info)), + uintptr(0), + ) + if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { + return info.PebBaseAddress, nil + } else { + return 0, windows.NTStatus(ret) + } + } else { + return 0, errors.New("can't find API to query 64 bit process from 32 bit") + } + } +} + +func readProcessMemory(h syscall.Handle, is32BitProcess bool, address uint64, size uint) []byte { + if is32BitProcess { + var read uint + + buffer := make([]byte, size) + + ret, _, _ := common.ProcNtReadVirtualMemory.Call( + uintptr(h), + uintptr(address), + uintptr(unsafe.Pointer(&buffer[0])), + uintptr(size), + uintptr(unsafe.Pointer(&read)), + ) + if int(ret) >= 0 && read > 0 { + return buffer[:read] + } + } else { + // reading a 64-bit process from a 32-bit one + if common.ProcNtWow64ReadVirtualMemory64.Find() == nil { // avoid panic + var read uint64 + + buffer := make([]byte, size) + + ret, _, _ := common.ProcNtWow64ReadVirtualMemory64.Call( + uintptr(h), + uintptr(address&0xFFFFFFFF), // the call expects a 64-bit value + uintptr(address>>32), + uintptr(unsafe.Pointer(&buffer[0])), + uintptr(size), // the call expects a 64-bit value + uintptr(0), // but size is 32-bit so pass zero as the high dword + uintptr(unsafe.Pointer(&read)), + ) + if int(ret) >= 0 && read > 0 { + return buffer[:uint(read)] + } + } + } + + // if we reach here, an error happened + return nil +} diff --git a/vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go new file mode 100644 index 0000000000000..befe521390056 --- /dev/null +++ b/vendor/github.com/shirou/gopsutil/v4/process/process_windows_64bit.go @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: BSD-3-Clause +//go:build (windows && amd64) || (windows && arm64) + +package process + +import ( + "syscall" + "unsafe" + + "github.com/shirou/gopsutil/v4/internal/common" + "golang.org/x/sys/windows" +) + +type PROCESS_MEMORY_COUNTERS struct { + CB uint32 + PageFaultCount uint32 + PeakWorkingSetSize uint64 + WorkingSetSize uint64 + QuotaPeakPagedPoolUsage uint64 + QuotaPagedPoolUsage uint64 + QuotaPeakNonPagedPoolUsage uint64 + QuotaNonPagedPoolUsage uint64 + PagefileUsage uint64 + PeakPagefileUsage uint64 +} + +func queryPebAddress(procHandle syscall.Handle, is32BitProcess bool) (uint64, error) { + if is32BitProcess { + // we are on a 64-bit process reading an external 32-bit process + var wow64 uint + + ret, _, _ := common.ProcNtQueryInformationProcess.Call( + uintptr(procHandle), + uintptr(common.ProcessWow64Information), + uintptr(unsafe.Pointer(&wow64)), + uintptr(unsafe.Sizeof(wow64)), + uintptr(0), + ) + if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { + return uint64(wow64), nil + } else { + return 0, windows.NTStatus(ret) + } + } else { + // we are on a 64-bit process reading an external 64-bit process + var info processBasicInformation64 + + ret, _, _ := common.ProcNtQueryInformationProcess.Call( + uintptr(procHandle), + uintptr(common.ProcessBasicInformation), + uintptr(unsafe.Pointer(&info)), + uintptr(unsafe.Sizeof(info)), + uintptr(0), + ) + if status := windows.NTStatus(ret); status == windows.STATUS_SUCCESS { + return info.PebBaseAddress, nil + } else { + return 0, windows.NTStatus(ret) + } + } +} + +func readProcessMemory(procHandle syscall.Handle, _ bool, address uint64, size uint) []byte { + var read uint + + buffer := make([]byte, size) + + ret, _, _ := common.ProcNtReadVirtualMemory.Call( + uintptr(procHandle), + uintptr(address), + uintptr(unsafe.Pointer(&buffer[0])), + uintptr(size), + uintptr(unsafe.Pointer(&read)), + ) + if int(ret) >= 0 && read > 0 { + return buffer[:read] + } + return nil +} diff --git a/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml b/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml new file mode 100644 index 0000000000000..dc6fefb979ec0 --- /dev/null +++ b/vendor/github.com/shoenig/go-m1cpu/.golangci.yaml @@ -0,0 +1,12 @@ +run: + timeout: 5m +linters: + enable: + - gofmt + - errcheck + - errname + - errorlint + - bodyclose + - durationcheck + - whitespace + diff --git a/vendor/github.com/shoenig/go-m1cpu/LICENSE b/vendor/github.com/shoenig/go-m1cpu/LICENSE new file mode 100644 index 0000000000000..e87a115e462e1 --- /dev/null +++ b/vendor/github.com/shoenig/go-m1cpu/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/shoenig/go-m1cpu/Makefile b/vendor/github.com/shoenig/go-m1cpu/Makefile new file mode 100644 index 0000000000000..28d786397d4b4 --- /dev/null +++ b/vendor/github.com/shoenig/go-m1cpu/Makefile @@ -0,0 +1,12 @@ +SHELL = bash + +default: test + +.PHONY: test +test: + @echo "--> Running Tests ..." + @go test -v -race ./... + +vet: + @echo "--> Vet Go sources ..." + @go vet ./... diff --git a/vendor/github.com/shoenig/go-m1cpu/README.md b/vendor/github.com/shoenig/go-m1cpu/README.md new file mode 100644 index 0000000000000..399657acf861c --- /dev/null +++ b/vendor/github.com/shoenig/go-m1cpu/README.md @@ -0,0 +1,66 @@ +# m1cpu + +[![Go Reference](https://pkg.go.dev/badge/github.com/shoenig/go-m1cpu.svg)](https://pkg.go.dev/github.com/shoenig/go-m1cpu) +[![MPL License](https://img.shields.io/github/license/shoenig/go-m1cpu?color=g&style=flat-square)](https://github.com/shoenig/go-m1cpu/blob/main/LICENSE) +[![Run CI Tests](https://github.com/shoenig/go-m1cpu/actions/workflows/ci.yaml/badge.svg)](https://github.com/shoenig/go-m1cpu/actions/workflows/ci.yaml) + +The `go-m1cpu` module is a library for inspecting Apple Silicon CPUs in Go. + +Use the `m1cpu` Go package for looking up the CPU frequency for Apple M1 and M2 CPUs. + +# Install + +```shell +go get github.com/shoenig/go-m1cpu@latest +``` + +# CGO + +This package requires the use of [CGO](https://go.dev/blog/cgo). + +Extracting the CPU properties is done via Apple's [IOKit](https://developer.apple.com/documentation/iokit?language=objc) +framework, which is accessible only through system C libraries. + +# Example + +Simple Go program to print Apple Silicon M1/M2 CPU speeds. + +```go +package main + +import ( + "fmt" + + "github.com/shoenig/go-m1cpu" +) + +func main() { + fmt.Println("Apple Silicon", m1cpu.IsAppleSilicon()) + + fmt.Println("pCore GHz", m1cpu.PCoreGHz()) + fmt.Println("eCore GHz", m1cpu.ECoreGHz()) + + fmt.Println("pCore Hz", m1cpu.PCoreHz()) + fmt.Println("eCore Hz", m1cpu.ECoreHz()) +} +``` + +Using `go test` to print out available information. + +``` +➜ go test -v -run Show +=== RUN Test_Show + cpu_test.go:42: pCore Hz 3504000000 + cpu_test.go:43: eCore Hz 2424000000 + cpu_test.go:44: pCore GHz 3.504 + cpu_test.go:45: eCore GHz 2.424 + cpu_test.go:46: pCore count 8 + cpu_test.go:47: eCoreCount 4 + cpu_test.go:50: pCore Caches 196608 131072 16777216 + cpu_test.go:53: eCore Caches 131072 65536 4194304 +--- PASS: Test_Show (0.00s) +``` + +# License + +Open source under the [MPL](LICENSE) diff --git a/vendor/github.com/shoenig/go-m1cpu/cpu.go b/vendor/github.com/shoenig/go-m1cpu/cpu.go new file mode 100644 index 0000000000000..502a8cce92e67 --- /dev/null +++ b/vendor/github.com/shoenig/go-m1cpu/cpu.go @@ -0,0 +1,213 @@ +//go:build darwin && arm64 && cgo + +package m1cpu + +// #cgo LDFLAGS: -framework CoreFoundation -framework IOKit +// #include +// #include +// #include +// #include +// +// #if !defined(MAC_OS_VERSION_12_0) || MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0 +// #define kIOMainPortDefault kIOMasterPortDefault +// #endif +// +// #define HzToGHz(hz) ((hz) / 1000000000.0) +// +// UInt64 global_pCoreHz; +// UInt64 global_eCoreHz; +// int global_pCoreCount; +// int global_eCoreCount; +// int global_pCoreL1InstCacheSize; +// int global_eCoreL1InstCacheSize; +// int global_pCoreL1DataCacheSize; +// int global_eCoreL1DataCacheSize; +// int global_pCoreL2CacheSize; +// int global_eCoreL2CacheSize; +// char global_brand[32]; +// +// UInt64 getFrequency(CFTypeRef typeRef) { +// CFDataRef cfData = typeRef; +// +// CFIndex size = CFDataGetLength(cfData); +// UInt8 buf[size]; +// CFDataGetBytes(cfData, CFRangeMake(0, size), buf); +// +// UInt8 b1 = buf[size-5]; +// UInt8 b2 = buf[size-6]; +// UInt8 b3 = buf[size-7]; +// UInt8 b4 = buf[size-8]; +// +// UInt64 pCoreHz = 0x00000000FFFFFFFF & ((b1<<24) | (b2 << 16) | (b3 << 8) | (b4)); +// return pCoreHz; +// } +// +// int sysctl_int(const char * name) { +// int value = -1; +// size_t size = 8; +// sysctlbyname(name, &value, &size, NULL, 0); +// return value; +// } +// +// void sysctl_string(const char * name, char * dest) { +// size_t size = 32; +// sysctlbyname(name, dest, &size, NULL, 0); +// } +// +// void initialize() { +// global_pCoreCount = sysctl_int("hw.perflevel0.physicalcpu"); +// global_eCoreCount = sysctl_int("hw.perflevel1.physicalcpu"); +// global_pCoreL1InstCacheSize = sysctl_int("hw.perflevel0.l1icachesize"); +// global_eCoreL1InstCacheSize = sysctl_int("hw.perflevel1.l1icachesize"); +// global_pCoreL1DataCacheSize = sysctl_int("hw.perflevel0.l1dcachesize"); +// global_eCoreL1DataCacheSize = sysctl_int("hw.perflevel1.l1dcachesize"); +// global_pCoreL2CacheSize = sysctl_int("hw.perflevel0.l2cachesize"); +// global_eCoreL2CacheSize = sysctl_int("hw.perflevel1.l2cachesize"); +// sysctl_string("machdep.cpu.brand_string", global_brand); +// +// CFMutableDictionaryRef matching = IOServiceMatching("AppleARMIODevice"); +// io_iterator_t iter; +// IOServiceGetMatchingServices(kIOMainPortDefault, matching, &iter); +// +// const size_t bufsize = 512; +// io_object_t obj; +// while ((obj = IOIteratorNext(iter))) { +// char class[bufsize]; +// IOObjectGetClass(obj, class); +// char name[bufsize]; +// IORegistryEntryGetName(obj, name); +// +// if (strncmp(name, "pmgr", bufsize) == 0) { +// CFTypeRef pCoreRef = IORegistryEntryCreateCFProperty(obj, CFSTR("voltage-states5-sram"), kCFAllocatorDefault, 0); +// CFTypeRef eCoreRef = IORegistryEntryCreateCFProperty(obj, CFSTR("voltage-states1-sram"), kCFAllocatorDefault, 0); +// +// long long pCoreHz = getFrequency(pCoreRef); +// long long eCoreHz = getFrequency(eCoreRef); +// +// global_pCoreHz = pCoreHz; +// global_eCoreHz = eCoreHz; +// return; +// } +// } +// } +// +// UInt64 eCoreHz() { +// return global_eCoreHz; +// } +// +// UInt64 pCoreHz() { +// return global_pCoreHz; +// } +// +// Float64 eCoreGHz() { +// return HzToGHz(global_eCoreHz); +// } +// +// Float64 pCoreGHz() { +// return HzToGHz(global_pCoreHz); +// } +// +// int pCoreCount() { +// return global_pCoreCount; +// } +// +// int eCoreCount() { +// return global_eCoreCount; +// } +// +// int pCoreL1InstCacheSize() { +// return global_pCoreL1InstCacheSize; +// } +// +// int pCoreL1DataCacheSize() { +// return global_pCoreL1DataCacheSize; +// } +// +// int pCoreL2CacheSize() { +// return global_pCoreL2CacheSize; +// } +// +// int eCoreL1InstCacheSize() { +// return global_eCoreL1InstCacheSize; +// } +// +// int eCoreL1DataCacheSize() { +// return global_eCoreL1DataCacheSize; +// } +// +// int eCoreL2CacheSize() { +// return global_eCoreL2CacheSize; +// } +// +// char * modelName() { +// return global_brand; +// } +import "C" + +func init() { + C.initialize() +} + +// IsAppleSilicon returns true on this platform. +func IsAppleSilicon() bool { + return true +} + +// PCoreHZ returns the max frequency in Hertz of the P-Core of an Apple Silicon CPU. +func PCoreHz() uint64 { + return uint64(C.pCoreHz()) +} + +// ECoreHZ returns the max frequency in Hertz of the E-Core of an Apple Silicon CPU. +func ECoreHz() uint64 { + return uint64(C.eCoreHz()) +} + +// PCoreGHz returns the max frequency in Gigahertz of the P-Core of an Apple Silicon CPU. +func PCoreGHz() float64 { + return float64(C.pCoreGHz()) +} + +// ECoreGHz returns the max frequency in Gigahertz of the E-Core of an Apple Silicon CPU. +func ECoreGHz() float64 { + return float64(C.eCoreGHz()) +} + +// PCoreCount returns the number of physical P (performance) cores. +func PCoreCount() int { + return int(C.pCoreCount()) +} + +// ECoreCount returns the number of physical E (efficiency) cores. +func ECoreCount() int { + return int(C.eCoreCount()) +} + +// PCoreCacheSize returns the sizes of the P (performance) core cache sizes +// in the order of +// +// - L1 instruction cache +// - L1 data cache +// - L2 cache +func PCoreCache() (int, int, int) { + return int(C.pCoreL1InstCacheSize()), + int(C.pCoreL1DataCacheSize()), + int(C.pCoreL2CacheSize()) +} + +// ECoreCacheSize returns the sizes of the E (efficiency) core cache sizes +// in the order of +// +// - L1 instruction cache +// - L1 data cache +// - L2 cache +func ECoreCache() (int, int, int) { + return int(C.eCoreL1InstCacheSize()), + int(C.eCoreL1DataCacheSize()), + int(C.eCoreL2CacheSize()) +} + +// ModelName returns the model name of the CPU. +func ModelName() string { + return C.GoString(C.modelName()) +} diff --git a/vendor/github.com/shoenig/go-m1cpu/incompatible.go b/vendor/github.com/shoenig/go-m1cpu/incompatible.go new file mode 100644 index 0000000000000..d425025aa84d5 --- /dev/null +++ b/vendor/github.com/shoenig/go-m1cpu/incompatible.go @@ -0,0 +1,53 @@ +//go:build !darwin || !arm64 || !cgo + +package m1cpu + +// IsAppleSilicon return false on this platform. +func IsAppleSilicon() bool { + return false +} + +// PCoreHZ requires darwin/arm64 +func PCoreHz() uint64 { + panic("m1cpu: not a darwin/arm64 system") +} + +// ECoreHZ requires darwin/arm64 +func ECoreHz() uint64 { + panic("m1cpu: not a darwin/arm64 system") +} + +// PCoreGHz requires darwin/arm64 +func PCoreGHz() float64 { + panic("m1cpu: not a darwin/arm64 system") +} + +// ECoreGHz requires darwin/arm64 +func ECoreGHz() float64 { + panic("m1cpu: not a darwin/arm64 system") +} + +// PCoreCount requires darwin/arm64 +func PCoreCount() int { + panic("m1cpu: not a darwin/arm64 system") +} + +// ECoreCount requires darwin/arm64 +func ECoreCount() int { + panic("m1cpu: not a darwin/arm64 system") +} + +// PCoreCacheSize requires darwin/arm64 +func PCoreCache() (int, int, int) { + panic("m1cpu: not a darwin/arm64 system") +} + +// ECoreCacheSize requires darwin/arm64 +func ECoreCache() (int, int, int) { + panic("m1cpu: not a darwin/arm64 system") +} + +// ModelName requires darwin/arm64 +func ModelName() string { + panic("m1cpu: not a darwin/arm64 system") +} diff --git a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml new file mode 100644 index 0000000000000..1b27f196286a0 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml @@ -0,0 +1,23 @@ +env: + CIRRUS_CLONE_DEPTH: 1 + GO_VERSION: go1.20 + +freebsd_12_task: + freebsd_instance: + image_family: freebsd-12-3 + install_script: | + pkg install -y go + GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest + bin/${GO_VERSION} download + build_script: bin/${GO_VERSION} build -v ./... + test_script: bin/${GO_VERSION} test -race ./... + +freebsd_13_task: + freebsd_instance: + image_family: freebsd-13-0 + install_script: | + pkg install -y go + GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest + bin/${GO_VERSION} download + build_script: bin/${GO_VERSION} build -v ./... + test_script: bin/${GO_VERSION} test -race ./... diff --git a/vendor/github.com/tklauser/go-sysconf/.gitignore b/vendor/github.com/tklauser/go-sysconf/.gitignore new file mode 100644 index 0000000000000..e482715909b65 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/.gitignore @@ -0,0 +1 @@ +_obj/ diff --git a/vendor/github.com/tklauser/go-sysconf/LICENSE b/vendor/github.com/tklauser/go-sysconf/LICENSE new file mode 100644 index 0000000000000..73c6b8991eb73 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2018-2022, Tobias Klauser +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/tklauser/go-sysconf/README.md b/vendor/github.com/tklauser/go-sysconf/README.md new file mode 100644 index 0000000000000..b83d5abf1d553 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/README.md @@ -0,0 +1,46 @@ +# go-sysconf + +[![Go Reference](https://pkg.go.dev/badge/github.com/tklauser/go-sysconf.svg)](https://pkg.go.dev/github.com/tklauser/go-sysconf) +[![GitHub Action Status](https://github.com/tklauser/go-sysconf/workflows/Tests/badge.svg)](https://github.com/tklauser/go-sysconf/actions?query=workflow%3ATests) + +`sysconf` for Go, without using cgo or external binaries (e.g. getconf). + +Supported operating systems: Linux, macOS, DragonflyBSD, FreeBSD, NetBSD, OpenBSD, Solaris/Illumos. + +All POSIX.1 and POSIX.2 variables are supported, see [References](#references) for a complete list. + +Additionally, the following non-standard variables are supported on some operating systems: + +| Variable | Supported on | +|---|---| +| `SC_PHYS_PAGES` | Linux, macOS, FreeBSD, NetBSD, OpenBSD, Solaris/Illumos | +| `SC_AVPHYS_PAGES` | Linux, OpenBSD, Solaris/Illumos | +| `SC_NPROCESSORS_CONF` | Linux, macOS, FreeBSD, NetBSD, OpenBSD, Solaris/Illumos | +| `SC_NPROCESSORS_ONLN` | Linux, macOS, FreeBSD, NetBSD, OpenBSD, Solaris/Illumos | +| `SC_UIO_MAXIOV` | Linux | + +## Usage + +```Go +package main + +import ( + "fmt" + + "github.com/tklauser/go-sysconf" +) + +func main() { + // get clock ticks, this will return the same as C.sysconf(C._SC_CLK_TCK) + clktck, err := sysconf.Sysconf(sysconf.SC_CLK_TCK) + if err == nil { + fmt.Printf("SC_CLK_TCK: %v\n", clktck) + } +} +``` + +## References + +* [POSIX documenation for `sysconf`](http://pubs.opengroup.org/onlinepubs/9699919799/functions/sysconf.html) +* [Linux manpage for `sysconf(3)`](http://man7.org/linux/man-pages/man3/sysconf.3.html) +* [glibc constants for `sysconf` parameters](https://www.gnu.org/software/libc/manual/html_node/Constants-for-Sysconf.html) diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf.go b/vendor/github.com/tklauser/go-sysconf/sysconf.go new file mode 100644 index 0000000000000..9d674930e5a63 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/sysconf.go @@ -0,0 +1,21 @@ +// Copyright 2018 Tobias Klauser. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sysconf implements the sysconf(3) function and provides the +// associated SC_* constants to query system configuration values. +package sysconf + +import "errors" + +//go:generate go run mksysconf.go + +var errInvalid = errors.New("invalid parameter value") + +// Sysconf returns the value of a sysconf(3) runtime system parameter. +// The name parameter should be a SC_* constant define in this package. The +// implementation is GOOS-specific and certain SC_* constants might not be +// defined for all GOOSes. +func Sysconf(name int) (int64, error) { + return sysconf(name) +} diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_bsd.go b/vendor/github.com/tklauser/go-sysconf/sysconf_bsd.go new file mode 100644 index 0000000000000..7c96157bb79e6 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_bsd.go @@ -0,0 +1,38 @@ +// Copyright 2018 Tobias Klauser. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || netbsd || openbsd +// +build darwin dragonfly freebsd netbsd openbsd + +package sysconf + +import "golang.org/x/sys/unix" + +func pathconf(path string, name int) int64 { + if val, err := unix.Pathconf(path, name); err == nil { + return int64(val) + } + return -1 +} + +func sysctl32(name string) int64 { + if val, err := unix.SysctlUint32(name); err == nil { + return int64(val) + } + return -1 +} + +func sysctl64(name string) int64 { + if val, err := unix.SysctlUint64(name); err == nil { + return int64(val) + } + return -1 +} + +func yesno(val int64) int64 { + if val == 0 { + return -1 + } + return val +} diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go b/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go new file mode 100644 index 0000000000000..3f5d83f692087 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go @@ -0,0 +1,296 @@ +// Copyright 2018 Tobias Klauser. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sysconf + +import ( + "strconv" + "strings" + "sync" + + "golang.org/x/sys/unix" +) + +const ( + _HOST_NAME_MAX = _MAXHOSTNAMELEN - 1 + _LOGIN_NAME_MAX = _MAXLOGNAME + _SYMLOOP_MAX = _MAXSYMLINKS +) + +var uname struct { + sync.Once + macOSMajor int +} + +// sysconf implements sysconf(4) as in the Darwin libc (derived from the FreeBSD +// libc), version 1534.81.1. +// See https://github.com/apple-oss-distributions/Libc/tree/Libc-1534.81.1. +func sysconf(name int) (int64, error) { + switch name { + case SC_AIO_LISTIO_MAX: + fallthrough + case SC_AIO_MAX: + return sysctl32("kern.aiomax"), nil + case SC_AIO_PRIO_DELTA_MAX: + return -1, nil + case SC_ARG_MAX: + return sysctl32("kern.argmax"), nil + case SC_ATEXIT_MAX: + return _INT_MAX, nil + case SC_CHILD_MAX: + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NPROC, &rlim); err == nil { + if rlim.Cur != unix.RLIM_INFINITY { + return int64(rlim.Cur), nil + } + } + return -1, nil + case SC_CLK_TCK: + return _CLK_TCK, nil + case SC_DELAYTIMER_MAX: + return -1, nil + case SC_GETGR_R_SIZE_MAX: + return 4096, nil + case SC_GETPW_R_SIZE_MAX: + return 4096, nil + case SC_IOV_MAX: + return _IOV_MAX, nil + case SC_MQ_OPEN_MAX: + return -1, nil + case SC_MQ_PRIO_MAX: + return -1, nil + case SC_NGROUPS_MAX: + return sysctl32("kern.ngroups"), nil + case SC_OPEN_MAX, SC_STREAM_MAX: + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil { + return -1, nil + } + if rlim.Cur > unix.RLIM_INFINITY { + return -1, nil + } + if rlim.Cur > _LONG_MAX { + return -1, unix.EOVERFLOW + } + return int64(rlim.Cur), nil + case SC_RTSIG_MAX: + return -1, nil + case SC_SEM_NSEMS_MAX: + return sysctl32("kern.sysv.semmns"), nil + case SC_SEM_VALUE_MAX: + return _POSIX_SEM_VALUE_MAX, nil + case SC_SIGQUEUE_MAX: + return -1, nil + case SC_THREAD_DESTRUCTOR_ITERATIONS: + return _PTHREAD_DESTRUCTOR_ITERATIONS, nil + case SC_THREAD_KEYS_MAX: + return _PTHREAD_KEYS_MAX, nil + case SC_THREAD_PRIO_INHERIT: + return _POSIX_THREAD_PRIO_INHERIT, nil + case SC_THREAD_PRIO_PROTECT: + return _POSIX_THREAD_PRIO_PROTECT, nil + case SC_THREAD_STACK_MIN: + return _PTHREAD_STACK_MIN, nil + case SC_THREAD_THREADS_MAX: + return -1, nil + case SC_TIMER_MAX: + return -1, nil + case SC_TTY_NAME_MAX: + // should be _PATH_DEV instead of "/" + return pathconf("/", _PC_NAME_MAX), nil + case SC_TZNAME_MAX: + return pathconf(_PATH_ZONEINFO, _PC_NAME_MAX), nil + + case SC_IPV6: + if _POSIX_IPV6 == 0 { + fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_DGRAM, 0) + if err == nil && fd >= 0 { + unix.Close(fd) + return int64(200112), nil + } + return 0, nil + } + return _POSIX_IPV6, nil + case SC_MESSAGE_PASSING: + if _POSIX_MESSAGE_PASSING == 0 { + return yesno(sysctl32("p1003_1b.message_passing")), nil + } + return _POSIX_MESSAGE_PASSING, nil + case SC_PRIORITIZED_IO: + if _POSIX_PRIORITIZED_IO == 0 { + return yesno(sysctl32("p1003_1b.prioritized_io")), nil + } + return _POSIX_PRIORITIZED_IO, nil + case SC_PRIORITY_SCHEDULING: + if _POSIX_PRIORITY_SCHEDULING == 0 { + return yesno(sysctl32("p1003_1b.priority_scheduling")), nil + } + return _POSIX_PRIORITY_SCHEDULING, nil + case SC_REALTIME_SIGNALS: + if _POSIX_REALTIME_SIGNALS == 0 { + return yesno(sysctl32("p1003_1b.realtime_signals")), nil + } + return _POSIX_REALTIME_SIGNALS, nil + case SC_SAVED_IDS: + return yesno(sysctl32("kern.saved_ids")), nil + case SC_SEMAPHORES: + if _POSIX_SEMAPHORES == 0 { + return yesno(sysctl32("p1003_1b.semaphores")), nil + } + return _POSIX_SEMAPHORES, nil + case SC_SPAWN: + uname.Once.Do(func() { + var u unix.Utsname + err := unix.Uname(&u) + if err != nil { + return + } + rel := unix.ByteSliceToString(u.Release[:]) + ver := strings.Split(rel, ".") + maj, _ := strconv.Atoi(ver[0]) + uname.macOSMajor = maj + }) + if uname.macOSMajor < 22 { + return -1, nil + } + // macOS 13 (Ventura) and later + return 200112, nil + case SC_SPIN_LOCKS: + return _POSIX_SPIN_LOCKS, nil + case SC_SPORADIC_SERVER: + return _POSIX_SPORADIC_SERVER, nil + case SC_SS_REPL_MAX: + return _POSIX_SS_REPL_MAX, nil + case SC_SYNCHRONIZED_IO: + if _POSIX_SYNCHRONIZED_IO == 0 { + return yesno(sysctl32("p1003_1b.synchronized_io")), nil + } + return _POSIX_SYNCHRONIZED_IO, nil + case SC_THREAD_ATTR_STACKADDR: + return _POSIX_THREAD_ATTR_STACKADDR, nil + case SC_THREAD_ATTR_STACKSIZE: + return _POSIX_THREAD_ATTR_STACKSIZE, nil + case SC_THREAD_CPUTIME: + return _POSIX_THREAD_CPUTIME, nil + case SC_THREAD_PRIORITY_SCHEDULING: + return _POSIX_THREAD_PRIORITY_SCHEDULING, nil + case SC_THREAD_PROCESS_SHARED: + return _POSIX_THREAD_PROCESS_SHARED, nil + case SC_THREAD_SAFE_FUNCTIONS: + return _POSIX_THREAD_SAFE_FUNCTIONS, nil + case SC_THREAD_SPORADIC_SERVER: + return _POSIX_THREAD_SPORADIC_SERVER, nil + case SC_TIMERS: + if _POSIX_TIMERS == 0 { + return yesno(sysctl32("p1003_1b.timers")), nil + } + return _POSIX_TIMERS, nil + case SC_TRACE: + return _POSIX_TRACE, nil + case SC_TRACE_EVENT_FILTER: + return _POSIX_TRACE_EVENT_FILTER, nil + case SC_TRACE_EVENT_NAME_MAX: + return _POSIX_TRACE_EVENT_NAME_MAX, nil + case SC_TRACE_INHERIT: + return _POSIX_TRACE_INHERIT, nil + case SC_TRACE_LOG: + return _POSIX_TRACE_LOG, nil + case SC_TRACE_NAME_MAX: + return _POSIX_TRACE_NAME_MAX, nil + case SC_TRACE_SYS_MAX: + return _POSIX_TRACE_SYS_MAX, nil + case SC_TRACE_USER_EVENT_MAX: + return _POSIX_TRACE_USER_EVENT_MAX, nil + case SC_TYPED_MEMORY_OBJECTS: + return _POSIX_TYPED_MEMORY_OBJECTS, nil + case SC_VERSION: + // TODO(tk): darwin libc uses sysctl(CTL_KERN, KERN_POSIX1) + return _POSIX_VERSION, nil + + case SC_V6_ILP32_OFF32: + if _V6_ILP32_OFF32 == 0 { + if unix.SizeofInt*_CHAR_BIT == 32 && + unix.SizeofInt == unix.SizeofLong && + unix.SizeofLong == unix.SizeofPtr && + unix.SizeofPtr == sizeofOffT { + return 1, nil + } + return -1, nil + } + return _V6_ILP32_OFF32, nil + case SC_V6_ILP32_OFFBIG: + if _V6_ILP32_OFFBIG == 0 { + if unix.SizeofInt*_CHAR_BIT == 32 && + unix.SizeofInt == unix.SizeofLong && + unix.SizeofLong == unix.SizeofPtr && + sizeofOffT*_CHAR_BIT >= 64 { + return 1, nil + } + return -1, nil + } + return _V6_ILP32_OFFBIG, nil + case SC_V6_LP64_OFF64: + if _V6_LP64_OFF64 == 0 { + if unix.SizeofInt*_CHAR_BIT == 32 && + unix.SizeofLong*_CHAR_BIT == 64 && + unix.SizeofLong == unix.SizeofPtr && + unix.SizeofPtr == sizeofOffT { + return 1, nil + } + return -1, nil + } + return _V6_LP64_OFF64, nil + case SC_V6_LPBIG_OFFBIG: + if _V6_LPBIG_OFFBIG == 0 { + if unix.SizeofInt*_CHAR_BIT >= 32 && + unix.SizeofLong*_CHAR_BIT >= 64 && + unix.SizeofPtr*_CHAR_BIT >= 64 && + sizeofOffT*_CHAR_BIT >= 64 { + return 1, nil + } + return -1, nil + } + return _V6_LPBIG_OFFBIG, nil + + case SC_2_CHAR_TERM: + return _POSIX2_CHAR_TERM, nil + case SC_2_PBS, + SC_2_PBS_ACCOUNTING, + SC_2_PBS_CHECKPOINT, + SC_2_PBS_LOCATE, + SC_2_PBS_MESSAGE, + SC_2_PBS_TRACK: + return _POSIX2_PBS, nil + case SC_2_UPE: + return _POSIX2_UPE, nil + + case SC_XOPEN_CRYPT: + return _XOPEN_CRYPT, nil + case SC_XOPEN_ENH_I18N: + return _XOPEN_ENH_I18N, nil + case SC_XOPEN_REALTIME: + return _XOPEN_REALTIME, nil + case SC_XOPEN_REALTIME_THREADS: + return _XOPEN_REALTIME_THREADS, nil + case SC_XOPEN_SHM: + return _XOPEN_SHM, nil + case SC_XOPEN_STREAMS: + return -1, nil + case SC_XOPEN_UNIX: + return _XOPEN_UNIX, nil + case SC_XOPEN_VERSION: + return _XOPEN_VERSION, nil + case SC_XOPEN_XCU_VERSION: + return _XOPEN_XCU_VERSION, nil + + case SC_PHYS_PAGES: + return sysctl64("hw.memsize") / int64(unix.Getpagesize()), nil + case SC_NPROCESSORS_CONF: + fallthrough + case SC_NPROCESSORS_ONLN: + return sysctl32("hw.ncpu"), nil + } + + return sysconfGeneric(name) +} diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_dragonfly.go b/vendor/github.com/tklauser/go-sysconf/sysconf_dragonfly.go new file mode 100644 index 0000000000000..c2ed8d12b410c --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_dragonfly.go @@ -0,0 +1,220 @@ +// Copyright 2018 Tobias Klauser. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sysconf + +import "golang.org/x/sys/unix" + +const ( + _HOST_NAME_MAX = _MAXHOSTNAMELEN - 1 + _LOGIN_NAME_MAX = _MAXLOGNAME + _SYMLOOP_MAX = _MAXSYMLINKS +) + +// sysconf implements sysconf(3) as in the FreeBSD 12 libc. +func sysconf(name int) (int64, error) { + switch name { + case SC_AIO_LISTIO_MAX: + return sysctl32("p1003_1b.aio_listio_max"), nil + case SC_AIO_MAX: + return sysctl32("p1003_1b.aio_max"), nil + case SC_AIO_PRIO_DELTA_MAX: + return sysctl32("p1003_1b.aio_prio_delta_max"), nil + case SC_ARG_MAX: + return sysctl32("kern.argmax"), nil + case SC_ATEXIT_MAX: + return _ATEXIT_SIZE, nil + case SC_CHILD_MAX: + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NPROC, &rlim); err == nil { + if rlim.Cur != unix.RLIM_INFINITY { + return rlim.Cur, nil + } + } + return -1, nil + case SC_CLK_TCK: + return _CLK_TCK, nil + case SC_DELAYTIMER_MAX: + return yesno(sysctl32("p1003_1b.delaytimer_max")), nil + case SC_GETGR_R_SIZE_MAX, SC_GETPW_R_SIZE_MAX: + return -1, nil + case SC_IOV_MAX: + return sysctl32("kern.iov_max"), nil + case SC_MQ_OPEN_MAX: + return sysctl32("kern.mqueue.mq_open_max"), nil + case SC_MQ_PRIO_MAX: + return sysctl32("kern.mqueue.mq_prio_max"), nil + case SC_NGROUPS_MAX: + return sysctl32("kern.ngroups"), nil + case SC_OPEN_MAX: + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil { + if rlim.Cur != unix.RLIM_INFINITY { + return rlim.Cur, nil + } + } + return -1, nil + case SC_RTSIG_MAX: + return yesno(sysctl32("p1003_1b.rtsig_max")), nil + case SC_SEM_NSEMS_MAX: + return -1, nil + case SC_SEM_VALUE_MAX: + return -1, nil + case SC_SIGQUEUE_MAX: + return yesno(sysctl32("p1003_1b.sigqueue_max")), nil + case SC_STREAM_MAX: + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil { + if rlim.Cur != unix.RLIM_INFINITY { + return rlim.Cur, nil + } + } + return -1, nil + case SC_THREAD_DESTRUCTOR_ITERATIONS: + return _PTHREAD_DESTRUCTOR_ITERATIONS, nil + case SC_THREAD_KEYS_MAX: + return _PTHREAD_KEYS_MAX, nil + case SC_THREAD_PRIO_INHERIT: + return _POSIX_THREAD_PRIO_INHERIT, nil + case SC_THREAD_PRIO_PROTECT: + return _POSIX_THREAD_PRIO_PROTECT, nil + case SC_THREAD_STACK_MIN: + return _PTHREAD_STACK_MIN, nil + case SC_THREAD_THREADS_MAX: + return -1, nil + case SC_TIMER_MAX: + return yesno(sysctl32("p1003_1b.timer_max")), nil + case SC_TTY_NAME_MAX: + return pathconf(_PATH_DEV, _PC_NAME_MAX), nil + case SC_TZNAME_MAX: + return pathconf(_PATH_ZONEINFO, _PC_NAME_MAX), nil + + case SC_ASYNCHRONOUS_IO: + if _POSIX_ASYNCHRONOUS_IO == 0 { + return sysctl64("p1003_1b.asynchronous_io"), nil + } + return _POSIX_ASYNCHRONOUS_IO, nil + case SC_IPV6: + if _POSIX_IPV6 == 0 { + fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_DGRAM, 0) + if err == nil && fd >= 0 { + unix.Close(fd) + return int64(200112), nil + } + return 0, nil + } + return _POSIX_IPV6, nil + case SC_MESSAGE_PASSING: + if _POSIX_MESSAGE_PASSING == 0 { + return yesno(sysctl32("p1003_1b.message_passing")), nil + } + return _POSIX_MESSAGE_PASSING, nil + case SC_PRIORITIZED_IO: + if _POSIX_PRIORITIZED_IO == 0 { + return yesno(sysctl32("p1003_1b.prioritized_io")), nil + } + return _POSIX_PRIORITIZED_IO, nil + case SC_PRIORITY_SCHEDULING: + if _POSIX_PRIORITY_SCHEDULING == 0 { + return yesno(sysctl32("p1003_1b.priority_scheduling")), nil + } + return _POSIX_PRIORITY_SCHEDULING, nil + case SC_REALTIME_SIGNALS: + if _POSIX_REALTIME_SIGNALS == 0 { + return yesno(sysctl32("p1003_1b.realtime_signals")), nil + } + return _POSIX_REALTIME_SIGNALS, nil + case SC_SAVED_IDS: + return yesno(sysctl32("kern.saved_ids")), nil + case SC_SEMAPHORES: + if _POSIX_SEMAPHORES == 0 { + return yesno(sysctl32("p1003_1b.semaphores")), nil + } + return _POSIX_SEMAPHORES, nil + case SC_SPAWN: + return _POSIX_SPAWN, nil + case SC_SPIN_LOCKS: + return _POSIX_SPIN_LOCKS, nil + case SC_SPORADIC_SERVER: + return _POSIX_SPORADIC_SERVER, nil + case SC_SYNCHRONIZED_IO: + if _POSIX_SYNCHRONIZED_IO == 0 { + return yesno(sysctl32("p1003_1b.synchronized_io")), nil + } + return _POSIX_SYNCHRONIZED_IO, nil + case SC_THREAD_ATTR_STACKADDR: + return _POSIX_THREAD_ATTR_STACKADDR, nil + case SC_THREAD_ATTR_STACKSIZE: + return _POSIX_THREAD_ATTR_STACKSIZE, nil + case SC_THREAD_CPUTIME: + return _POSIX_THREAD_CPUTIME, nil + case SC_THREAD_PRIORITY_SCHEDULING: + return _POSIX_THREAD_PRIORITY_SCHEDULING, nil + case SC_THREAD_PROCESS_SHARED: + return _POSIX_THREAD_PROCESS_SHARED, nil + case SC_THREAD_SAFE_FUNCTIONS: + return _POSIX_THREAD_SAFE_FUNCTIONS, nil + case SC_THREAD_SPORADIC_SERVER: + return _POSIX_THREAD_SPORADIC_SERVER, nil + case SC_TIMERS: + if _POSIX_TIMERS == 0 { + return yesno(sysctl32("p1003_1b.timers")), nil + } + return _POSIX_TIMERS, nil + case SC_TRACE: + return _POSIX_TRACE, nil + case SC_TYPED_MEMORY_OBJECTS: + return _POSIX_TYPED_MEMORY_OBJECTS, nil + case SC_VERSION: + // TODO(tk): FreeBSD libc uses sysctl(CTL_KERN, KERN_POSIX1) + return _POSIX_VERSION, nil + + /* TODO(tk): these need GOARCH-dependent integer size checks + case SC_V6_ILP32_OFF32: + return _V6_ILP32_OFF32, nil + case SC_V6_ILP32_OFFBIG: + return _V6_ILP32_OFFBIG, nil + case SC_V6_LP64_OFF64: + return _V6_LP64_OFF64, nil + case SC_V6_LPBIG_OFFBIG: + return _V6_LPBIG_OFFBIG, nil + */ + + case SC_2_CHAR_TERM: + return _POSIX2_CHAR_TERM, nil + case SC_2_PBS, + SC_2_PBS_ACCOUNTING, + SC_2_PBS_CHECKPOINT, + SC_2_PBS_LOCATE, + SC_2_PBS_MESSAGE, + SC_2_PBS_TRACK: + return _POSIX2_PBS, nil + case SC_2_UPE: + return _POSIX2_UPE, nil + + case SC_XOPEN_CRYPT: + return _XOPEN_CRYPT, nil + case SC_XOPEN_ENH_I18N: + return _XOPEN_ENH_I18N, nil + case SC_XOPEN_REALTIME: + return _XOPEN_REALTIME, nil + case SC_XOPEN_REALTIME_THREADS: + return _XOPEN_REALTIME_THREADS, nil + case SC_XOPEN_SHM: + return _XOPEN_SHM, nil + case SC_XOPEN_STREAMS: + return -1, nil + case SC_XOPEN_UNIX: + return _XOPEN_UNIX, nil + + case SC_PHYS_PAGES: + return sysctl64("hw.availpages"), nil + case SC_NPROCESSORS_CONF: + fallthrough + case SC_NPROCESSORS_ONLN: + return sysctl32("hw.ncpu"), nil + } + + return sysconfGeneric(name) +} diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_freebsd.go b/vendor/github.com/tklauser/go-sysconf/sysconf_freebsd.go new file mode 100644 index 0000000000000..b7939888aee92 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_freebsd.go @@ -0,0 +1,226 @@ +// Copyright 2018 Tobias Klauser. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sysconf + +import "golang.org/x/sys/unix" + +const ( + _HOST_NAME_MAX = _MAXHOSTNAMELEN - 1 + _LOGIN_NAME_MAX = _MAXLOGNAME + _SYMLOOP_MAX = _MAXSYMLINKS +) + +// sysconf implements sysconf(3) as in the FreeBSD 12 libc. +func sysconf(name int) (int64, error) { + switch name { + case SC_AIO_LISTIO_MAX: + return sysctl32("p1003_1b.aio_listio_max"), nil + case SC_AIO_MAX: + return sysctl32("p1003_1b.aio_max"), nil + case SC_AIO_PRIO_DELTA_MAX: + return sysctl32("p1003_1b.aio_prio_delta_max"), nil + case SC_ARG_MAX: + return sysctl32("kern.argmax"), nil + case SC_ATEXIT_MAX: + return _ATEXIT_SIZE, nil + case SC_CHILD_MAX: + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NPROC, &rlim); err == nil { + if rlim.Cur != unix.RLIM_INFINITY { + return rlim.Cur, nil + } + } + return -1, nil + case SC_CLK_TCK: + return _CLK_TCK, nil + case SC_DELAYTIMER_MAX: + return sysctl32("p1003_1b.delaytimer_max"), nil + case SC_GETGR_R_SIZE_MAX, SC_GETPW_R_SIZE_MAX: + return -1, nil + case SC_IOV_MAX: + return sysctl32("kern.iov_max"), nil + case SC_MQ_OPEN_MAX: + return yesno(sysctl32("p1003_1b.mq_open_max")), nil + case SC_MQ_PRIO_MAX: + return _MQ_PRIO_MAX, nil + case SC_NGROUPS_MAX: + return sysctl32("kern.ngroups"), nil + case SC_OPEN_MAX: + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil { + if rlim.Cur != unix.RLIM_INFINITY { + return rlim.Cur, nil + } + } + return -1, nil + case SC_RTSIG_MAX: + return sysctl32("p1003_1b.rtsig_max"), nil + case SC_SEM_NSEMS_MAX: + return -1, nil + case SC_SEM_VALUE_MAX: + return _SEM_VALUE_MAX, nil + case SC_SIGQUEUE_MAX: + return sysctl32("p1003_1b.sigqueue_max"), nil + case SC_STREAM_MAX: + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil { + return -1, nil + } + if rlim.Cur == unix.RLIM_INFINITY { + return -1, nil + } + if rlim.Cur > _LONG_MAX { + return -1, unix.EOVERFLOW + } + if rlim.Cur > _SHRT_MAX { + return _SHRT_MAX, nil + } + return rlim.Cur, nil + case SC_THREAD_DESTRUCTOR_ITERATIONS: + return _PTHREAD_DESTRUCTOR_ITERATIONS, nil + case SC_THREAD_KEYS_MAX: + return _PTHREAD_KEYS_MAX, nil + case SC_THREAD_PRIO_INHERIT: + return _POSIX_THREAD_PRIO_INHERIT, nil + case SC_THREAD_PRIO_PROTECT: + return _POSIX_THREAD_PRIO_PROTECT, nil + case SC_THREAD_STACK_MIN: + return _PTHREAD_STACK_MIN, nil + case SC_THREAD_THREADS_MAX: + return -1, nil + case SC_TIMER_MAX: + return yesno(sysctl32("p1003_1b.timer_max")), nil + case SC_TTY_NAME_MAX: + return pathconf(_PATH_DEV, _PC_NAME_MAX), nil + case SC_TZNAME_MAX: + return pathconf(_PATH_ZONEINFO, _PC_NAME_MAX), nil + + case SC_IPV6: + if _POSIX_IPV6 == 0 { + fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_DGRAM, 0) + if err == nil && fd >= 0 { + unix.Close(fd) + return int64(200112), nil + } + return 0, nil + } + return _POSIX_IPV6, nil + case SC_MESSAGE_PASSING: + if _POSIX_MESSAGE_PASSING == 0 { + return yesno(sysctl32("p1003_1b.message_passing")), nil + } + return _POSIX_MESSAGE_PASSING, nil + case SC_PRIORITIZED_IO: + if _POSIX_PRIORITIZED_IO == 0 { + return yesno(sysctl32("p1003_1b.prioritized_io")), nil + } + return _POSIX_PRIORITIZED_IO, nil + case SC_PRIORITY_SCHEDULING: + if _POSIX_PRIORITY_SCHEDULING == 0 { + return yesno(sysctl32("p1003_1b.priority_scheduling")), nil + } + return _POSIX_PRIORITY_SCHEDULING, nil + case SC_REALTIME_SIGNALS: + if _POSIX_REALTIME_SIGNALS == 0 { + return yesno(sysctl32("p1003_1b.realtime_signals")), nil + } + return _POSIX_REALTIME_SIGNALS, nil + case SC_SAVED_IDS: + return yesno(sysctl32("kern.saved_ids")), nil + case SC_SEMAPHORES: + if _POSIX_SEMAPHORES == 0 { + return yesno(sysctl32("p1003_1b.semaphores")), nil + } + return _POSIX_SEMAPHORES, nil + case SC_SPAWN: + return _POSIX_SPAWN, nil + case SC_SPIN_LOCKS: + return _POSIX_SPIN_LOCKS, nil + case SC_SPORADIC_SERVER: + return _POSIX_SPORADIC_SERVER, nil + case SC_SYNCHRONIZED_IO: + if _POSIX_SYNCHRONIZED_IO == 0 { + return yesno(sysctl32("p1003_1b.synchronized_io")), nil + } + return _POSIX_SYNCHRONIZED_IO, nil + case SC_THREAD_ATTR_STACKADDR: + return _POSIX_THREAD_ATTR_STACKADDR, nil + case SC_THREAD_ATTR_STACKSIZE: + return _POSIX_THREAD_ATTR_STACKSIZE, nil + case SC_THREAD_CPUTIME: + return _POSIX_THREAD_CPUTIME, nil + case SC_THREAD_PRIORITY_SCHEDULING: + return _POSIX_THREAD_PRIORITY_SCHEDULING, nil + case SC_THREAD_PROCESS_SHARED: + return _POSIX_THREAD_PROCESS_SHARED, nil + case SC_THREAD_SAFE_FUNCTIONS: + return _POSIX_THREAD_SAFE_FUNCTIONS, nil + case SC_TIMERS: + if _POSIX_TIMERS == 0 { + return yesno(sysctl32("p1003_1b.timers")), nil + } + return _POSIX_TIMERS, nil + case SC_TRACE: + return _POSIX_TRACE, nil + case SC_TYPED_MEMORY_OBJECTS: + return _POSIX_TYPED_MEMORY_OBJECTS, nil + case SC_VERSION: + // TODO(tk): FreeBSD libc uses sysctl(CTL_KERN, KERN_POSIX1) + return _POSIX_VERSION, nil + + /* TODO(tk): these need GOARCH-dependent integer size checks + case SC_V6_ILP32_OFF32: + return _V6_ILP32_OFF32, nil + case SC_V6_ILP32_OFFBIG: + return _V6_ILP32_OFFBIG, nil + case SC_V6_LP64_OFF64: + return _V6_LP64_OFF64, nil + case SC_V6_LPBIG_OFFBIG: + return _V6_LPBIG_OFFBIG, nil + */ + + case SC_2_CHAR_TERM: + return _POSIX2_CHAR_TERM, nil + case SC_2_PBS, + SC_2_PBS_ACCOUNTING, + SC_2_PBS_CHECKPOINT, + SC_2_PBS_LOCATE, + SC_2_PBS_MESSAGE, + SC_2_PBS_TRACK: + return _POSIX2_PBS, nil + case SC_2_UPE: + return _POSIX2_UPE, nil + + case SC_XOPEN_CRYPT: + return _XOPEN_CRYPT, nil + case SC_XOPEN_ENH_I18N: + return _XOPEN_ENH_I18N, nil + case SC_XOPEN_REALTIME: + return _XOPEN_REALTIME, nil + case SC_XOPEN_REALTIME_THREADS: + return _XOPEN_REALTIME_THREADS, nil + case SC_XOPEN_SHM: + return _XOPEN_SHM, nil + case SC_XOPEN_STREAMS: + return -1, nil + case SC_XOPEN_UNIX: + return _XOPEN_UNIX, nil + + case SC_PHYS_PAGES: + if val, err := unix.SysctlUint64("hw.availpages"); err == nil { + return int64(val), nil + } + return -1, nil + case SC_NPROCESSORS_CONF: + fallthrough + case SC_NPROCESSORS_ONLN: + if val, err := unix.SysctlUint32("hw.ncpu"); err == nil { + return int64(val), nil + } + return -1, nil + } + + return sysconfGeneric(name) +} diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_generic.go b/vendor/github.com/tklauser/go-sysconf/sysconf_generic.go new file mode 100644 index 0000000000000..248bdc99cda5a --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_generic.go @@ -0,0 +1,46 @@ +// Copyright 2021 Tobias Klauser. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd +// +build darwin dragonfly freebsd linux netbsd openbsd + +package sysconf + +import "os" + +func sysconfGeneric(name int) (int64, error) { + // POSIX default values + if sc, err := sysconfPOSIX(name); err == nil { + return sc, nil + } + + switch name { + case SC_BC_BASE_MAX: + return _BC_BASE_MAX, nil + case SC_BC_DIM_MAX: + return _BC_DIM_MAX, nil + case SC_BC_SCALE_MAX: + return _BC_SCALE_MAX, nil + case SC_BC_STRING_MAX: + return _BC_STRING_MAX, nil + case SC_COLL_WEIGHTS_MAX: + return _COLL_WEIGHTS_MAX, nil + case SC_EXPR_NEST_MAX: + return _EXPR_NEST_MAX, nil + case SC_HOST_NAME_MAX: + return _HOST_NAME_MAX, nil + case SC_LINE_MAX: + return _LINE_MAX, nil + case SC_LOGIN_NAME_MAX: + return _LOGIN_NAME_MAX, nil + case SC_PAGESIZE: // same as SC_PAGE_SIZE + return int64(os.Getpagesize()), nil + case SC_RE_DUP_MAX: + return _RE_DUP_MAX, nil + case SC_SYMLOOP_MAX: + return _SYMLOOP_MAX, nil + } + + return -1, errInvalid +} diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_linux.go b/vendor/github.com/tklauser/go-sysconf/sysconf_linux.go new file mode 100644 index 0000000000000..5fb49ac7b6a42 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_linux.go @@ -0,0 +1,345 @@ +// Copyright 2018 Tobias Klauser. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sysconf + +import ( + "bufio" + "io/ioutil" + "os" + "runtime" + "strconv" + "strings" + + "github.com/tklauser/numcpus" + "golang.org/x/sys/unix" +) + +const ( + // CLK_TCK is a constant on Linux for all architectures except alpha and ia64. + // See e.g. + // https://git.musl-libc.org/cgit/musl/tree/src/conf/sysconf.c#n30 + // https://github.com/containerd/cgroups/pull/12 + // https://lore.kernel.org/lkml/agtlq6$iht$1@penguin.transmeta.com/ + _SYSTEM_CLK_TCK = 100 +) + +func readProcFsInt64(path string, fallback int64) int64 { + data, err := ioutil.ReadFile(path) + if err != nil { + return fallback + } + i, err := strconv.ParseInt(string(data[:len(data)-1]), 0, 64) + if err != nil { + return fallback + } + return i +} + +// getMemPages computes mem*unit/os.Getpagesize(), but avoids overflowing int64. +func getMemPages(mem uint64, unit uint32) int64 { + pageSize := os.Getpagesize() + for unit > 1 && pageSize > 1 { + unit >>= 1 + pageSize >>= 1 + } + mem *= uint64(unit) + for pageSize > 1 { + pageSize >>= 1 + mem >>= 1 + } + return int64(mem) +} + +func getPhysPages() int64 { + var si unix.Sysinfo_t + err := unix.Sysinfo(&si) + if err != nil { + return int64(0) + } + return getMemPages(uint64(si.Totalram), si.Unit) +} + +func getAvPhysPages() int64 { + var si unix.Sysinfo_t + err := unix.Sysinfo(&si) + if err != nil { + return int64(0) + } + return getMemPages(uint64(si.Freeram), si.Unit) +} + +func getNprocsSysfs() (int64, error) { + n, err := numcpus.GetOnline() + return int64(n), err +} + +func getNprocsProcStat() (int64, error) { + f, err := os.Open("/proc/stat") + if err != nil { + return -1, err + } + defer f.Close() + + count := int64(0) + s := bufio.NewScanner(f) + for s.Scan() { + if line := strings.TrimSpace(s.Text()); strings.HasPrefix(line, "cpu") { + l := strings.SplitN(line, " ", 2) + _, err := strconv.ParseInt(l[0][3:], 10, 64) + if err == nil { + count++ + } + } else { + // The current format of /proc/stat has all the + // cpu* lines at the beginning. Assume this + // stays this way. + break + } + } + return count, nil +} + +func getNprocs() int64 { + count, err := getNprocsSysfs() + if err == nil { + return count + } + + count, err = getNprocsProcStat() + if err == nil { + return count + } + + // default to the value determined at runtime startup if all else fails + return int64(runtime.NumCPU()) +} + +func getNprocsConf() int64 { + count, err := numcpus.GetConfigured() + if err == nil { + return int64(count) + } + + // TODO(tk): fall back to reading /proc/cpuinfo on legacy systems + // without sysfs? + + return getNprocs() +} + +func hasClock(clockid int32) bool { + var res unix.Timespec + if err := unix.ClockGetres(clockid, &res); err != nil { + return false + } + return true +} + +func max(a, b int64) int64 { + if a > b { + return a + } + return b +} + +func sysconf(name int) (int64, error) { + switch name { + case SC_AIO_LISTIO_MAX: + return -1, nil + case SC_AIO_MAX: + return -1, nil + case SC_AIO_PRIO_DELTA_MAX: + return _AIO_PRIO_DELTA_MAX, nil + case SC_ARG_MAX: + argMax := int64(_POSIX_ARG_MAX) + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_STACK, &rlim); err == nil { + argMax = max(argMax, int64(rlim.Cur/4)) + } + return argMax, nil + case SC_ATEXIT_MAX: + return _INT_MAX, nil + case SC_CHILD_MAX: + childMax := int64(-1) + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NPROC, &rlim); err == nil && rlim.Cur != unix.RLIM_INFINITY { + childMax = int64(rlim.Cur) + } + return childMax, nil + case SC_CLK_TCK: + return _SYSTEM_CLK_TCK, nil + case SC_DELAYTIMER_MAX: + return _DELAYTIMER_MAX, nil + case SC_GETGR_R_SIZE_MAX: + return _NSS_BUFLEN_GROUP, nil + case SC_GETPW_R_SIZE_MAX: + return _NSS_BUFLEN_PASSWD, nil + case SC_MQ_OPEN_MAX: + return -1, nil + case SC_MQ_PRIO_MAX: + return _MQ_PRIO_MAX, nil + case SC_NGROUPS_MAX: + return readProcFsInt64("/proc/sys/kernel/ngroups_max", _NGROUPS_MAX), nil + case SC_OPEN_MAX: + openMax := int64(_OPEN_MAX) + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil { + openMax = int64(rlim.Cur) + } + return openMax, nil + case SC_RTSIG_MAX: + return _RTSIG_MAX, nil + case SC_SEM_NSEMS_MAX: + return -1, nil + case SC_SEM_VALUE_MAX: + return _SEM_VALUE_MAX, nil + case SC_SIGQUEUE_MAX: + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_SIGPENDING, &rlim); err == nil { + return int64(rlim.Cur), nil + } + return readProcFsInt64("/proc/sys/kernel/rtsig-max", _POSIX_SIGQUEUE_MAX), nil + case SC_STREAM_MAX: + return _STREAM_MAX, nil + case SC_THREAD_DESTRUCTOR_ITERATIONS: + return _POSIX_THREAD_DESTRUCTOR_ITERATIONS, nil + case SC_THREAD_KEYS_MAX: + return _PTHREAD_KEYS_MAX, nil + case SC_THREAD_PRIO_INHERIT: + return _POSIX_THREAD_PRIO_INHERIT, nil + case SC_THREAD_PRIO_PROTECT: + return _POSIX_THREAD_PRIO_PROTECT, nil + case SC_THREAD_STACK_MIN: + return _PTHREAD_STACK_MIN, nil + case SC_THREAD_THREADS_MAX: + return -1, nil + case SC_TIMER_MAX: + return -1, nil + case SC_TTY_NAME_MAX: + return _TTY_NAME_MAX, nil + case SC_TZNAME_MAX: + return -1, nil + + case SC_CPUTIME: + if hasClock(unix.CLOCK_PROCESS_CPUTIME_ID) { + return _POSIX_VERSION, nil + } + return -1, nil + case SC_MONOTONIC_CLOCK: + if hasClock(unix.CLOCK_MONOTONIC) { + return _POSIX_VERSION, nil + } + return -1, nil + case SC_SAVED_IDS: + return _POSIX_SAVED_IDS, nil + case SC_SPAWN: + return _POSIX_SPAWN, nil + case SC_SPIN_LOCKS: + return _POSIX_SPIN_LOCKS, nil + case SC_SPORADIC_SERVER: + return _POSIX_SPORADIC_SERVER, nil + case SC_SYNCHRONIZED_IO: + return _POSIX_SYNCHRONIZED_IO, nil + case SC_THREAD_ATTR_STACKADDR: + return _POSIX_THREAD_ATTR_STACKADDR, nil + case SC_THREAD_ATTR_STACKSIZE: + return _POSIX_THREAD_ATTR_STACKSIZE, nil + case SC_THREAD_CPUTIME: + if hasClock(unix.CLOCK_THREAD_CPUTIME_ID) { + return _POSIX_VERSION, nil + } + return -1, nil + case SC_THREAD_PRIORITY_SCHEDULING: + return _POSIX_THREAD_PRIORITY_SCHEDULING, nil + case SC_THREAD_PROCESS_SHARED: + return _POSIX_THREAD_PROCESS_SHARED, nil + case SC_THREAD_SAFE_FUNCTIONS: + return _POSIX_THREAD_SAFE_FUNCTIONS, nil + case SC_THREAD_SPORADIC_SERVER: + return _POSIX_THREAD_SPORADIC_SERVER, nil + case SC_TRACE: + return _POSIX_TRACE, nil + case SC_TRACE_EVENT_FILTER: + return _POSIX_TRACE_EVENT_FILTER, nil + case SC_TRACE_EVENT_NAME_MAX: + return -1, nil + case SC_TRACE_INHERIT: + return _POSIX_TRACE_INHERIT, nil + case SC_TRACE_LOG: + return _POSIX_TRACE_LOG, nil + case SC_TRACE_NAME_MAX: + return -1, nil + case SC_TRACE_SYS_MAX: + return -1, nil + case SC_TRACE_USER_EVENT_MAX: + return -1, nil + case SC_TYPED_MEMORY_OBJECTS: + return _POSIX_TYPED_MEMORY_OBJECTS, nil + + case SC_V7_ILP32_OFF32: + return _POSIX_V7_ILP32_OFF32, nil + case SC_V7_ILP32_OFFBIG: + return _POSIX_V7_ILP32_OFFBIG, nil + case SC_V7_LP64_OFF64: + return _POSIX_V7_LP64_OFF64, nil + case SC_V7_LPBIG_OFFBIG: + return _POSIX_V7_LPBIG_OFFBIG, nil + + case SC_V6_ILP32_OFF32: + return _POSIX_V6_ILP32_OFF32, nil + case SC_V6_ILP32_OFFBIG: + return _POSIX_V6_ILP32_OFFBIG, nil + case SC_V6_LP64_OFF64: + return _POSIX_V6_LP64_OFF64, nil + case SC_V6_LPBIG_OFFBIG: + return _POSIX_V6_LPBIG_OFFBIG, nil + + case SC_2_C_VERSION: + return _POSIX2_C_VERSION, nil + case SC_2_CHAR_TERM: + return _POSIX2_CHAR_TERM, nil + case SC_2_PBS, + SC_2_PBS_ACCOUNTING, + SC_2_PBS_CHECKPOINT, + SC_2_PBS_LOCATE, + SC_2_PBS_MESSAGE, + SC_2_PBS_TRACK: + return -1, nil + case SC_2_UPE: + return -1, nil + + case SC_XOPEN_CRYPT: + // removed in glibc 2.28 + return -1, nil + case SC_XOPEN_ENH_I18N: + return _XOPEN_ENH_I18N, nil + case SC_XOPEN_REALTIME: + return _XOPEN_REALTIME, nil + case SC_XOPEN_REALTIME_THREADS: + return _XOPEN_REALTIME_THREADS, nil + case SC_XOPEN_SHM: + return _XOPEN_SHM, nil + case SC_XOPEN_STREAMS: + return -1, nil + case SC_XOPEN_UNIX: + return _XOPEN_UNIX, nil + case SC_XOPEN_VERSION: + return _XOPEN_VERSION, nil + case SC_XOPEN_XCU_VERSION: + return _XOPEN_XCU_VERSION, nil + + case SC_PHYS_PAGES: + return getPhysPages(), nil + case SC_AVPHYS_PAGES: + return getAvPhysPages(), nil + case SC_NPROCESSORS_CONF: + return getNprocsConf(), nil + case SC_NPROCESSORS_ONLN: + return getNprocs(), nil + case SC_UIO_MAXIOV: // same as _SC_IOV_MAX + return _UIO_MAXIOV, nil + } + + return sysconfGeneric(name) +} diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go b/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go new file mode 100644 index 0000000000000..325d4a6a83f44 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_netbsd.go @@ -0,0 +1,250 @@ +// Copyright 2018 Tobias Klauser. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sysconf + +import ( + "sync" + + "golang.org/x/sys/unix" +) + +const ( + _HOST_NAME_MAX = _MAXHOSTNAMELEN + _LOGIN_NAME_MAX = _MAXLOGNAME + 1 + _SYMLOOP_MAX = _MAXSYMLINKS + + _POSIX2_C_BIND = 1 + _POSIX2_C_DEV = -1 + _POSIX2_CHAR_TERM = -1 + _POSIX2_FORT_DEV = -1 + _POSIX2_FORT_RUN = -1 + _POSIX2_LOCALEDEF = -1 + _POSIX2_SW_DEV = -1 + _POSIX2_UPE = -1 +) + +var ( + clktck int64 + clktckOnce sync.Once +) + +func sysconfPOSIX(name int) (int64, error) { + // NetBSD does not define all _POSIX_* values used in sysconf_posix.go + // The supported ones are handled in sysconf below. + return -1, errInvalid +} + +func sysconf(name int) (int64, error) { + // NetBSD uses sysctl to get some of these values. For the user.* namespace, + // calls get handled by user_sysctl in /usr/src/lib/libc/gen/sysctl.c + // Duplicate the relevant values here. + + switch name { + + // 1003.1 + case SC_ARG_MAX: + return sysctl32("kern.argmax"), nil + case SC_CHILD_MAX: + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NPROC, &rlim); err == nil { + if rlim.Cur != unix.RLIM_INFINITY { + return int64(rlim.Cur), nil + } + } + return -1, nil + case SC_CLK_TCK: + clktckOnce.Do(func() { + clktck = -1 + if ci, err := unix.SysctlClockinfo("kern.clockrate"); err == nil { + clktck = int64(ci.Hz) + } + }) + return clktck, nil + case SC_NGROUPS_MAX: + return sysctl32("kern.ngroups"), nil + case SC_JOB_CONTROL: + return sysctl32("kern.job_control"), nil + case SC_OPEN_MAX: + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil { + return int64(rlim.Cur), nil + } + return -1, nil + case SC_STREAM_MAX: + // sysctl("user.stream_max") + return _FOPEN_MAX, nil + case SC_TZNAME_MAX: + // sysctl("user.tzname_max") + return _NAME_MAX, nil + case SC_SAVED_IDS: + return yesno(sysctl32("kern.saved_ids")), nil + case SC_VERSION: + return sysctl32("kern.posix1version"), nil + + // 1003.1b + case SC_FSYNC: + return sysctl32("kern.fsync"), nil + case SC_SYNCHRONIZED_IO: + return sysctl32("kern.synchronized_io"), nil + case SC_MAPPED_FILES: + return sysctl32("kern.mapped_files"), nil + case SC_MEMLOCK: + return sysctl32("kern.memlock"), nil + case SC_MEMLOCK_RANGE: + return sysctl32("kern.memlock_range"), nil + case SC_MEMORY_PROTECTION: + return sysctl32("kern.memory_protection"), nil + case SC_MONOTONIC_CLOCK: + return sysctl32("kern.monotonic_clock"), nil + case SC_SEMAPHORES: + return sysctl32("kern.posix_semaphores"), nil + case SC_TIMERS: + return sysctl32("kern.posix_timers"), nil + + // 1003.1c + case SC_LOGIN_NAME_MAX: + return sysctl32("kern.login_name_max"), nil + case SC_THREADS: + return sysctl32("kern.posix_threads"), nil + + // 1003.1j + case SC_BARRIERS: + return yesno(sysctl32("kern.posix_barriers")), nil + case SC_SPIN_LOCKS: + return yesno(sysctl32("kern.posix_spin_locks")), nil + case SC_READER_WRITER_LOCKS: + return yesno(sysctl32("kern.posix_reader_writer_locks")), nil + + // 1003.2 + case SC_2_VERSION: + // sysctl user.posix2_version + return _POSIX2_VERSION, nil + case SC_2_C_BIND: + // sysctl user.posix2_c_bind + return _POSIX2_C_BIND, nil + case SC_2_C_DEV: + // sysctl user.posix2_c_dev + return _POSIX2_C_DEV, nil + case SC_2_CHAR_TERM: + // sysctl user.posix2_char_term + return _POSIX2_CHAR_TERM, nil + case SC_2_FORT_DEV: + // sysctl user.posix2_fort_dev + return _POSIX2_FORT_DEV, nil + case SC_2_FORT_RUN: + // sysctl user.posix2_fort_run + return _POSIX2_FORT_RUN, nil + case SC_2_LOCALEDEF: + // sysctl user.posix2_localedef + return _POSIX2_LOCALEDEF, nil + case SC_2_SW_DEV: + // sysctl user.posix2_sw_dev + return _POSIX2_SW_DEV, nil + case SC_2_UPE: + // sysctl user.posix2_upe + return _POSIX2_UPE, nil + + // XPG 4.2 + case SC_IOV_MAX: + return sysctl32("kern.iov_max"), nil + case SC_XOPEN_SHM: + return yesno(sysctl32("kern.ipc.sysvshm")), nil + + // 1003.1-2001, XSI Option Group + case SC_AIO_LISTIO_MAX: + return sysctl32("kern.aio_listio_max"), nil + case SC_AIO_MAX: + return sysctl32("kern.aio_max"), nil + case SC_ASYNCHRONOUS_IO: + return yesno(sysctl32("kern.posix_aio")), nil + case SC_MESSAGE_PASSING: + return yesno(sysctl32("kern.posix_msg")), nil + case SC_MQ_OPEN_MAX: + return sysctl32("kern.mqueue.mq_open_max"), nil + case SC_MQ_PRIO_MAX: + return sysctl32("kern.mqueue.mq_prio_max"), nil + case SC_PRIORITY_SCHEDULING: + return yesno(sysctl32("kern.posix_sched")), nil + case SC_ATEXIT_MAX: + // sysctl("user.atexit_max") + return -1, nil // TODO + + // 1003.1-2001, TSF + case SC_GETGR_R_SIZE_MAX: + return _GETGR_R_SIZE_MAX, nil + case SC_GETPW_R_SIZE_MAX: + return _GETPW_R_SIZE_MAX, nil + + // Unsorted + case SC_HOST_NAME_MAX: + return _MAXHOSTNAMELEN, nil + case SC_PASS_MAX: + return _PASSWORD_LEN, nil + case SC_REGEXP: + return _POSIX_REGEXP, nil + case SC_SHARED_MEMORY_OBJECTS: + return _POSIX_SHARED_MEMORY_OBJECTS, nil + case SC_SHELL: + return _POSIX_SHELL, nil + case SC_SPAWN: + return _POSIX_SPAWN, nil + + // Extensions + case SC_NPROCESSORS_CONF: + return sysctl32("hw.ncpu"), nil + case SC_NPROCESSORS_ONLN: + return sysctl32("hw.ncpuonline"), nil + + // Linux/Solaris + case SC_PHYS_PAGES: + return sysctl64("hw.physmem64") / int64(unix.Getpagesize()), nil + + // Native + case SC_SCHED_RT_TS: + return sysctl32("kern.sched.rtts"), nil + case SC_SCHED_PRI_MIN: + return sysctl32("kern.sched.pri_min"), nil + case SC_SCHED_PRI_MAX: + return sysctl32("kern.sched.pri_max"), nil + case SC_THREAD_DESTRUCTOR_ITERATIONS: + return _POSIX_THREAD_DESTRUCTOR_ITERATIONS, nil + case SC_THREAD_KEYS_MAX: + return _POSIX_THREAD_KEYS_MAX, nil + case SC_THREAD_STACK_MIN: + return int64(unix.Getpagesize()), nil + case SC_THREAD_THREADS_MAX: + return sysctl32("kern.maxproc"), nil + case SC_THREAD_ATTR_STACKADDR: + return _POSIX_THREAD_ATTR_STACKADDR, nil + case SC_THREAD_ATTR_STACKSIZE: + return _POSIX_THREAD_ATTR_STACKSIZE, nil + case SC_THREAD_SAFE_FUNCTIONS: + return _POSIX_THREAD_SAFE_FUNCTIONS, nil + case SC_THREAD_PRIO_PROTECT: + return _POSIX_THREAD_PRIO_PROTECT, nil + case SC_THREAD_PRIORITY_SCHEDULING, + SC_THREAD_PRIO_INHERIT, + SC_THREAD_PROCESS_SHARED: + return -1, nil + case SC_TTY_NAME_MAX: + return pathconf(_PATH_DEV, _PC_NAME_MAX), nil + case SC_TIMER_MAX: + return _POSIX_TIMER_MAX, nil + case SC_SEM_NSEMS_MAX: + return _LONG_MAX, nil + case SC_CPUTIME: + return _POSIX_CPUTIME, nil + case SC_THREAD_CPUTIME: + return _POSIX_THREAD_CPUTIME, nil + case SC_DELAYTIMER_MAX: + return _POSIX_DELAYTIMER_MAX, nil + case SC_SIGQUEUE_MAX: + return _POSIX_SIGQUEUE_MAX, nil + case SC_REALTIME_SIGNALS: + return 200112, nil + } + + return sysconfGeneric(name) +} diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_openbsd.go b/vendor/github.com/tklauser/go-sysconf/sysconf_openbsd.go new file mode 100644 index 0000000000000..c0c394abe44ac --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_openbsd.go @@ -0,0 +1,271 @@ +// Copyright 2018 Tobias Klauser. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sysconf + +import "golang.org/x/sys/unix" + +// sysconf implements sysconf(3) as in the OpenBSD 6.3 libc. +func sysconf(name int) (int64, error) { + switch name { + case SC_AIO_LISTIO_MAX, + SC_AIO_MAX, + SC_AIO_PRIO_DELTA_MAX: + return -1, nil + case SC_ARG_MAX: + return sysctl32("kern.argmax"), nil + case SC_ATEXIT_MAX: + return -1, nil + case SC_CHILD_MAX: + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NPROC, &rlim); err == nil { + if rlim.Cur != unix.RLIM_INFINITY { + return int64(rlim.Cur), nil + } + } + return -1, nil + case SC_CLK_TCK: + return _CLK_TCK, nil + case SC_DELAYTIMER_MAX: + return -1, nil + case SC_GETGR_R_SIZE_MAX: + return _GR_BUF_LEN, nil + case SC_GETPW_R_SIZE_MAX: + return _PW_BUF_LEN, nil + case SC_IOV_MAX: + return _IOV_MAX, nil + case SC_LOGIN_NAME_MAX: + return _LOGIN_NAME_MAX, nil + case SC_NGROUPS_MAX: + return sysctl32("kern.ngroups"), nil + case SC_OPEN_MAX: + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil { + if rlim.Cur != unix.RLIM_INFINITY { + return int64(rlim.Cur), nil + } + } + return -1, nil + case SC_SEM_NSEMS_MAX: + return -1, nil + case SC_SEM_VALUE_MAX: + return _SEM_VALUE_MAX, nil + case SC_SIGQUEUE_MAX: + return -1, nil + case SC_STREAM_MAX: + var rlim unix.Rlimit + if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil { + if rlim.Cur != unix.RLIM_INFINITY { + if rlim.Cur > _SHRT_MAX { + return _SHRT_MAX, nil + } + return int64(rlim.Cur), nil + } + } + return -1, nil + case SC_THREAD_DESTRUCTOR_ITERATIONS: + return _PTHREAD_DESTRUCTOR_ITERATIONS, nil + case SC_THREAD_KEYS_MAX: + return _PTHREAD_KEYS_MAX, nil + case SC_THREAD_STACK_MIN: + return _PTHREAD_STACK_MIN, nil + case SC_THREAD_THREADS_MAX: + return -1, nil + case SC_TIMER_MAX: + return -1, nil + case SC_TTY_NAME_MAX: + return _TTY_NAME_MAX, nil + case SC_TZNAME_MAX: + return _NAME_MAX, nil + + case SC_BARRIERS: + return _POSIX_BARRIERS, nil + case SC_FSYNC: + return _POSIX_FSYNC, nil + case SC_IPV6: + if _POSIX_IPV6 == 0 { + fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_DGRAM, 0) + if err == nil && fd >= 0 { + unix.Close(fd) + return int64(200112), nil + } + return 0, nil + } + return _POSIX_IPV6, nil + case SC_JOB_CONTROL: + return _POSIX_JOB_CONTROL, nil + case SC_MAPPED_FILES: + return _POSIX_MAPPED_FILES, nil + case SC_MONOTONIC_CLOCK: + return _POSIX_MONOTONIC_CLOCK, nil + case SC_SAVED_IDS: + return _POSIX_SAVED_IDS, nil + case SC_SEMAPHORES: + return _POSIX_SEMAPHORES, nil + case SC_SPAWN: + return _POSIX_SPAWN, nil + case SC_SPIN_LOCKS: + return _POSIX_SPIN_LOCKS, nil + case SC_SPORADIC_SERVER: + return _POSIX_SPORADIC_SERVER, nil + case SC_SYNCHRONIZED_IO: + return _POSIX_SYNCHRONIZED_IO, nil + case SC_THREAD_ATTR_STACKADDR: + return _POSIX_THREAD_ATTR_STACKADDR, nil + case SC_THREAD_ATTR_STACKSIZE: + return _POSIX_THREAD_ATTR_STACKSIZE, nil + case SC_THREAD_CPUTIME: + return _POSIX_THREAD_CPUTIME, nil + case SC_THREAD_PRIO_INHERIT: + return _POSIX_THREAD_PRIO_INHERIT, nil + case SC_THREAD_PRIO_PROTECT: + return _POSIX_THREAD_PRIO_PROTECT, nil + case SC_THREAD_PRIORITY_SCHEDULING: + return _POSIX_THREAD_PRIORITY_SCHEDULING, nil + case SC_THREAD_PROCESS_SHARED: + return _POSIX_THREAD_PROCESS_SHARED, nil + case SC_THREAD_ROBUST_PRIO_INHERIT: + return _POSIX_THREAD_ROBUST_PRIO_INHERIT, nil + case SC_THREAD_ROBUST_PRIO_PROTECT: + return _POSIX_THREAD_ROBUST_PRIO_PROTECT, nil + case SC_THREAD_SAFE_FUNCTIONS: + return _POSIX_THREAD_SAFE_FUNCTIONS, nil + case SC_THREAD_SPORADIC_SERVER: + return _POSIX_THREAD_SPORADIC_SERVER, nil + case SC_THREADS: + return _POSIX_THREADS, nil + case SC_TIMEOUTS: + return _POSIX_TIMEOUTS, nil + case SC_TIMERS: + return _POSIX_TIMERS, nil + case SC_TRACE, + SC_TRACE_EVENT_FILTER, + SC_TRACE_EVENT_NAME_MAX, + SC_TRACE_INHERIT, + SC_TRACE_LOG: + return _POSIX_TRACE, nil + case SC_TYPED_MEMORY_OBJECTS: + return _POSIX_TYPED_MEMORY_OBJECTS, nil + + case SC_V7_ILP32_OFF32: + return _POSIX_V7_ILP32_OFF32, nil + case SC_V7_ILP32_OFFBIG: + if _POSIX_V7_ILP32_OFFBIG == 0 { + if unix.SizeofInt*_CHAR_BIT == 32 && + unix.SizeofLong*_CHAR_BIT == 32 && + unix.SizeofPtr*_CHAR_BIT == 32 && + sizeofOffT*_CHAR_BIT >= 64 { + return 1, nil + } + return -1, nil + } + return _POSIX_V7_ILP32_OFFBIG, nil + case SC_V7_LP64_OFF64: + if _POSIX_V7_LP64_OFF64 == 0 { + if unix.SizeofInt*_CHAR_BIT == 32 && + unix.SizeofLong*_CHAR_BIT == 64 && + unix.SizeofPtr*_CHAR_BIT == 64 && + sizeofOffT*_CHAR_BIT == 64 { + return 1, nil + } + return -1, nil + } + return _POSIX_V7_LP64_OFF64, nil + case SC_V7_LPBIG_OFFBIG: + if _POSIX_V7_LPBIG_OFFBIG == 0 { + if unix.SizeofInt*_CHAR_BIT >= 32 && + unix.SizeofLong*_CHAR_BIT >= 64 && + unix.SizeofPtr*_CHAR_BIT >= 64 && + sizeofOffT*_CHAR_BIT >= 64 { + return 1, nil + } + return -1, nil + } + return _POSIX_V7_LPBIG_OFFBIG, nil + + case SC_V6_ILP32_OFF32: + return _POSIX_V6_ILP32_OFF32, nil + case SC_V6_ILP32_OFFBIG: + if _POSIX_V6_ILP32_OFFBIG == 0 { + if unix.SizeofInt*_CHAR_BIT == 32 && + unix.SizeofLong*_CHAR_BIT == 32 && + unix.SizeofPtr*_CHAR_BIT == 32 && + sizeofOffT*_CHAR_BIT >= 64 { + return 1, nil + } + return -1, nil + } + return _POSIX_V6_ILP32_OFFBIG, nil + case SC_V6_LP64_OFF64: + if _POSIX_V6_LP64_OFF64 == 0 { + if unix.SizeofInt*_CHAR_BIT == 32 && + unix.SizeofLong*_CHAR_BIT == 64 && + unix.SizeofPtr*_CHAR_BIT == 64 && + sizeofOffT*_CHAR_BIT == 64 { + return 1, nil + } + return -1, nil + } + return _POSIX_V6_LP64_OFF64, nil + case SC_V6_LPBIG_OFFBIG: + if _POSIX_V6_LPBIG_OFFBIG == 0 { + if unix.SizeofInt*_CHAR_BIT >= 32 && + unix.SizeofLong*_CHAR_BIT >= 64 && + unix.SizeofPtr*_CHAR_BIT >= 64 && + sizeofOffT*_CHAR_BIT >= 64 { + return 1, nil + } + return -1, nil + } + return _POSIX_V6_LPBIG_OFFBIG, nil + + case SC_2_CHAR_TERM: + return _POSIX2_CHAR_TERM, nil + case SC_2_PBS, + SC_2_PBS_ACCOUNTING, + SC_2_PBS_CHECKPOINT, + SC_2_PBS_LOCATE, + SC_2_PBS_MESSAGE, + SC_2_PBS_TRACK: + return _POSIX2_PBS, nil + case SC_2_UPE: + return _POSIX2_UPE, nil + case SC_2_VERSION: + return _POSIX2_VERSION, nil + + case SC_XOPEN_CRYPT: + return _XOPEN_CRYPT, nil + case SC_XOPEN_ENH_I18N: + return _XOPEN_ENH_I18N, nil + case SC_XOPEN_REALTIME: + return _XOPEN_REALTIME, nil + case SC_XOPEN_REALTIME_THREADS: + return _XOPEN_REALTIME_THREADS, nil + case SC_XOPEN_SHM: + return _XOPEN_SHM, nil + case SC_XOPEN_STREAMS: + return _XOPEN_STREAMS, nil + case SC_XOPEN_UNIX: + return _XOPEN_UNIX, nil + case SC_XOPEN_UUCP: + return _XOPEN_UUCP, nil + + case SC_AVPHYS_PAGES: + if uvm, err := unix.SysctlUvmexp("vm.uvmexp"); err == nil { + return int64(uvm.Free), nil + } + return -1, nil + case SC_PHYS_PAGES: + return sysctl64("hw.physmem") / int64(unix.Getpagesize()), nil + case SC_NPROCESSORS_CONF: + return sysctl32("hw.ncpu"), nil + case SC_NPROCESSORS_ONLN: + if val, err := unix.SysctlUint32("hw.ncpuonline"); err == nil { + return int64(val), nil + } + return sysctl32("hw.ncpu"), nil + } + + return sysconfGeneric(name) +} diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_posix.go b/vendor/github.com/tklauser/go-sysconf/sysconf_posix.go new file mode 100644 index 0000000000000..e61c0bc73e42e --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_posix.go @@ -0,0 +1,83 @@ +// Copyright 2018 Tobias Klauser. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || dragonfly || freebsd || linux || openbsd +// +build darwin dragonfly freebsd linux openbsd + +package sysconf + +func sysconfPOSIX(name int) (int64, error) { + switch name { + case SC_ADVISORY_INFO: + return _POSIX_ADVISORY_INFO, nil + case SC_ASYNCHRONOUS_IO: + return _POSIX_ASYNCHRONOUS_IO, nil + case SC_BARRIERS: + return _POSIX_BARRIERS, nil + case SC_CLOCK_SELECTION: + return _POSIX_CLOCK_SELECTION, nil + case SC_CPUTIME: + return _POSIX_CPUTIME, nil + case SC_FSYNC: + return _POSIX_FSYNC, nil + case SC_IPV6: + return _POSIX_IPV6, nil + case SC_JOB_CONTROL: + return _POSIX_JOB_CONTROL, nil + case SC_MAPPED_FILES: + return _POSIX_MAPPED_FILES, nil + case SC_MEMLOCK: + return _POSIX_MEMLOCK, nil + case SC_MEMLOCK_RANGE: + return _POSIX_MEMLOCK_RANGE, nil + case SC_MONOTONIC_CLOCK: + return _POSIX_MONOTONIC_CLOCK, nil + case SC_MEMORY_PROTECTION: + return _POSIX_MEMORY_PROTECTION, nil + case SC_MESSAGE_PASSING: + return _POSIX_MESSAGE_PASSING, nil + case SC_PRIORITIZED_IO: + return _POSIX_PRIORITIZED_IO, nil + case SC_PRIORITY_SCHEDULING: + return _POSIX_PRIORITY_SCHEDULING, nil + case SC_RAW_SOCKETS: + return _POSIX_RAW_SOCKETS, nil + case SC_READER_WRITER_LOCKS: + return _POSIX_READER_WRITER_LOCKS, nil + case SC_REALTIME_SIGNALS: + return _POSIX_REALTIME_SIGNALS, nil + case SC_REGEXP: + return _POSIX_REGEXP, nil + case SC_SEMAPHORES: + return _POSIX_SEMAPHORES, nil + case SC_SHARED_MEMORY_OBJECTS: + return _POSIX_SHARED_MEMORY_OBJECTS, nil + case SC_SHELL: + return _POSIX_SHELL, nil + case SC_THREADS: + return _POSIX_THREADS, nil + case SC_TIMEOUTS: + return _POSIX_TIMEOUTS, nil + case SC_TIMERS: + return _POSIX_TIMERS, nil + case SC_VERSION: + return _POSIX_VERSION, nil + + case SC_2_C_BIND: + return _POSIX2_C_BIND, nil + case SC_2_C_DEV: + return _POSIX2_C_DEV, nil + case SC_2_FORT_DEV: + return -1, nil + case SC_2_FORT_RUN: + return -1, nil + case SC_2_LOCALEDEF: + return _POSIX2_LOCALEDEF, nil + case SC_2_SW_DEV: + return _POSIX2_SW_DEV, nil + case SC_2_VERSION: + return _POSIX2_VERSION, nil + } + return -1, errInvalid +} diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_solaris.go b/vendor/github.com/tklauser/go-sysconf/sysconf_solaris.go new file mode 100644 index 0000000000000..443b21439dd45 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_solaris.go @@ -0,0 +1,14 @@ +// Copyright 2021 Tobias Klauser. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sysconf + +import "golang.org/x/sys/unix" + +func sysconf(name int) (int64, error) { + if name < 0 { + return -1, errInvalid + } + return unix.Sysconf(name) +} diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_unsupported.go b/vendor/github.com/tklauser/go-sysconf/sysconf_unsupported.go new file mode 100644 index 0000000000000..478d692005ea1 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_unsupported.go @@ -0,0 +1,17 @@ +// Copyright 2021 Tobias Klauser. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package sysconf + +import ( + "fmt" + "runtime" +) + +func sysconf(name int) (int64, error) { + return -1, fmt.Errorf("unsupported on %s", runtime.GOOS) +} diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go new file mode 100644 index 0000000000000..6fadf3db1fa2f --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go @@ -0,0 +1,254 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_defs_darwin.go + +//go:build darwin +// +build darwin + +package sysconf + +const ( + SC_AIO_LISTIO_MAX = 0x2a + SC_AIO_MAX = 0x2b + SC_AIO_PRIO_DELTA_MAX = 0x2c + SC_ARG_MAX = 0x1 + SC_ATEXIT_MAX = 0x6b + SC_BC_BASE_MAX = 0x9 + SC_BC_DIM_MAX = 0xa + SC_BC_SCALE_MAX = 0xb + SC_BC_STRING_MAX = 0xc + SC_CHILD_MAX = 0x2 + SC_CLK_TCK = 0x3 + SC_COLL_WEIGHTS_MAX = 0xd + SC_DELAYTIMER_MAX = 0x2d + SC_EXPR_NEST_MAX = 0xe + SC_GETGR_R_SIZE_MAX = 0x46 + SC_GETPW_R_SIZE_MAX = 0x47 + SC_HOST_NAME_MAX = 0x48 + SC_IOV_MAX = 0x38 + SC_LINE_MAX = 0xf + SC_LOGIN_NAME_MAX = 0x49 + SC_MQ_OPEN_MAX = 0x2e + SC_MQ_PRIO_MAX = 0x4b + SC_NGROUPS_MAX = 0x4 + SC_OPEN_MAX = 0x5 + SC_PAGE_SIZE = 0x1d + SC_PAGESIZE = 0x1d + SC_THREAD_DESTRUCTOR_ITERATIONS = 0x55 + SC_THREAD_KEYS_MAX = 0x56 + SC_THREAD_STACK_MIN = 0x5d + SC_THREAD_THREADS_MAX = 0x5e + SC_RE_DUP_MAX = 0x10 + SC_RTSIG_MAX = 0x30 + SC_SEM_NSEMS_MAX = 0x31 + SC_SEM_VALUE_MAX = 0x32 + SC_SIGQUEUE_MAX = 0x33 + SC_STREAM_MAX = 0x1a + SC_SYMLOOP_MAX = 0x78 + SC_TIMER_MAX = 0x34 + SC_TTY_NAME_MAX = 0x65 + SC_TZNAME_MAX = 0x1b + + SC_ADVISORY_INFO = 0x41 + SC_ASYNCHRONOUS_IO = 0x1c + SC_BARRIERS = 0x42 + SC_CLOCK_SELECTION = 0x43 + SC_CPUTIME = 0x44 + SC_FSYNC = 0x26 + SC_IPV6 = 0x76 + SC_JOB_CONTROL = 0x6 + SC_MAPPED_FILES = 0x2f + SC_MEMLOCK = 0x1e + SC_MEMLOCK_RANGE = 0x1f + SC_MEMORY_PROTECTION = 0x20 + SC_MESSAGE_PASSING = 0x21 + SC_MONOTONIC_CLOCK = 0x4a + SC_PRIORITIZED_IO = 0x22 + SC_PRIORITY_SCHEDULING = 0x23 + SC_RAW_SOCKETS = 0x77 + SC_READER_WRITER_LOCKS = 0x4c + SC_REALTIME_SIGNALS = 0x24 + SC_REGEXP = 0x4d + SC_SAVED_IDS = 0x7 + SC_SEMAPHORES = 0x25 + SC_SHARED_MEMORY_OBJECTS = 0x27 + SC_SHELL = 0x4e + SC_SPAWN = 0x4f + SC_SPIN_LOCKS = 0x50 + SC_SPORADIC_SERVER = 0x51 + SC_SS_REPL_MAX = 0x7e + SC_SYNCHRONIZED_IO = 0x28 + SC_THREAD_ATTR_STACKADDR = 0x52 + SC_THREAD_ATTR_STACKSIZE = 0x53 + SC_THREAD_CPUTIME = 0x54 + SC_THREAD_PRIO_INHERIT = 0x57 + SC_THREAD_PRIO_PROTECT = 0x58 + SC_THREAD_PRIORITY_SCHEDULING = 0x59 + SC_THREAD_PROCESS_SHARED = 0x5a + SC_THREAD_SAFE_FUNCTIONS = 0x5b + SC_THREAD_SPORADIC_SERVER = 0x5c + SC_THREADS = 0x60 + SC_TIMEOUTS = 0x5f + SC_TIMERS = 0x29 + SC_TRACE = 0x61 + SC_TRACE_EVENT_FILTER = 0x62 + SC_TRACE_EVENT_NAME_MAX = 0x7f + SC_TRACE_INHERIT = 0x63 + SC_TRACE_LOG = 0x64 + SC_TRACE_NAME_MAX = 0x80 + SC_TRACE_SYS_MAX = 0x81 + SC_TRACE_USER_EVENT_MAX = 0x82 + SC_TYPED_MEMORY_OBJECTS = 0x66 + SC_VERSION = 0x8 + + SC_V6_ILP32_OFF32 = 0x67 + SC_V6_ILP32_OFFBIG = 0x68 + SC_V6_LP64_OFF64 = 0x69 + SC_V6_LPBIG_OFFBIG = 0x6a + + SC_2_C_BIND = 0x12 + SC_2_C_DEV = 0x13 + SC_2_CHAR_TERM = 0x14 + SC_2_FORT_DEV = 0x15 + SC_2_FORT_RUN = 0x16 + SC_2_LOCALEDEF = 0x17 + SC_2_PBS = 0x3b + SC_2_PBS_ACCOUNTING = 0x3c + SC_2_PBS_CHECKPOINT = 0x3d + SC_2_PBS_LOCATE = 0x3e + SC_2_PBS_MESSAGE = 0x3f + SC_2_PBS_TRACK = 0x40 + SC_2_SW_DEV = 0x18 + SC_2_UPE = 0x19 + SC_2_VERSION = 0x11 + + SC_XOPEN_CRYPT = 0x6c + SC_XOPEN_ENH_I18N = 0x6d + SC_XOPEN_REALTIME = 0x6f + SC_XOPEN_REALTIME_THREADS = 0x70 + SC_XOPEN_SHM = 0x71 + SC_XOPEN_STREAMS = 0x72 + SC_XOPEN_UNIX = 0x73 + SC_XOPEN_VERSION = 0x74 + SC_XOPEN_XCU_VERSION = 0x79 + + SC_PHYS_PAGES = 0xc8 + SC_NPROCESSORS_CONF = 0x39 + SC_NPROCESSORS_ONLN = 0x3a +) + +const ( + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0x2 + _EXPR_NEST_MAX = 0x20 + _IOV_MAX = 0x400 + _LINE_MAX = 0x800 + _NAME_MAX = 0xff + _RE_DUP_MAX = 0xff + + _CLK_TCK = 0x64 + + _MAXHOSTNAMELEN = 0x100 + _MAXLOGNAME = 0xff + _MAXSYMLINKS = 0x20 + + _POSIX_ADVISORY_INFO = -0x1 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = -0x1 + _POSIX_BARRIERS = -0x1 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = -0x1 + _POSIX_CPUTIME = -0x1 + _POSIX_FSYNC = 0x30db0 + _POSIX_IPV6 = 0x30db0 + _POSIX_JOB_CONTROL = 0x30db0 + _POSIX_MAPPED_FILES = 0x30db0 + _POSIX_MEMLOCK = -0x1 + _POSIX_MEMLOCK_RANGE = -0x1 + _POSIX_MEMORY_PROTECTION = 0x30db0 + _POSIX_MESSAGE_PASSING = -0x1 + _POSIX_MONOTONIC_CLOCK = -0x1 + _POSIX_PRIORITIZED_IO = -0x1 + _POSIX_PRIORITY_SCHEDULING = -0x1 + _POSIX_RAW_SOCKETS = -0x1 + _POSIX_READER_WRITER_LOCKS = 0x30db0 + _POSIX_REALTIME_SIGNALS = -0x1 + _POSIX_REGEXP = 0x30db0 + _POSIX_SEM_VALUE_MAX = 0x7fff + _POSIX_SEMAPHORES = -0x1 + _POSIX_SHARED_MEMORY_OBJECTS = -0x1 + _POSIX_SHELL = 0x30db0 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPIN_LOCKS = -0x1 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SS_REPL_MAX = 0x4 + _POSIX_SYNCHRONIZED_IO = -0x1 + _POSIX_THREAD_ATTR_STACKADDR = 0x30db0 + _POSIX_THREAD_ATTR_STACKSIZE = 0x30db0 + _POSIX_THREAD_CPUTIME = -0x1 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_KEYS_MAX = 0x80 + _POSIX_THREAD_PRIO_INHERIT = -0x1 + _POSIX_THREAD_PRIO_PROTECT = -0x1 + _POSIX_THREAD_PRIORITY_SCHEDULING = -0x1 + _POSIX_THREAD_PROCESS_SHARED = 0x30db0 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x30db0 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x30db0 + _POSIX_TIMEOUTS = -0x1 + _POSIX_TIMERS = -0x1 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_EVENT_NAME_MAX = 0x1e + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TRACE_NAME_MAX = 0x8 + _POSIX_TRACE_SYS_MAX = 0x8 + _POSIX_TRACE_USER_EVENT_MAX = 0x20 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x30db0 + + _V6_ILP32_OFF32 = -0x1 + _V6_ILP32_OFFBIG = -0x1 + _V6_LP64_OFF64 = 0x1 + _V6_LPBIG_OFFBIG = 0x1 + + _POSIX2_C_BIND = 0x30db0 + _POSIX2_C_DEV = 0x30db0 + _POSIX2_CHAR_TERM = 0x30db0 + _POSIX2_LOCALEDEF = 0x30db0 + _POSIX2_PBS = -0x1 + _POSIX2_SW_DEV = 0x30db0 + _POSIX2_UPE = 0x30db0 + _POSIX2_VERSION = 0x30db0 + + _XOPEN_CRYPT = 0x1 + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = -0x1 + _XOPEN_REALTIME_THREADS = -0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x258 + _XOPEN_XCU_VERSION = 0x4 + + _PTHREAD_DESTRUCTOR_ITERATIONS = 0x4 + _PTHREAD_KEYS_MAX = 0x200 + _PTHREAD_STACK_MIN = 0x2000 +) + +const ( + _PC_NAME_MAX = 0x4 + + _PATH_ZONEINFO = "/usr/share/zoneinfo" +) + +const ( + _CHAR_BIT = 0x8 + + _INT_MAX = 0x7fffffff + _LONG_MAX = 0x7fffffffffffffff + + sizeofOffT = 0x8 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_dragonfly.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_dragonfly.go new file mode 100644 index 0000000000000..0864cd4482723 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_dragonfly.go @@ -0,0 +1,228 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_defs_dragonfly.go + +//go:build dragonfly +// +build dragonfly + +package sysconf + +const ( + SC_AIO_LISTIO_MAX = 0x2a + SC_AIO_MAX = 0x2b + SC_AIO_PRIO_DELTA_MAX = 0x2c + SC_ARG_MAX = 0x1 + SC_ATEXIT_MAX = 0x6b + SC_BC_BASE_MAX = 0x9 + SC_BC_DIM_MAX = 0xa + SC_BC_SCALE_MAX = 0xb + SC_BC_STRING_MAX = 0xc + SC_CHILD_MAX = 0x2 + SC_CLK_TCK = 0x3 + SC_COLL_WEIGHTS_MAX = 0xd + SC_DELAYTIMER_MAX = 0x2d + SC_EXPR_NEST_MAX = 0xe + SC_GETGR_R_SIZE_MAX = 0x46 + SC_GETPW_R_SIZE_MAX = 0x47 + SC_HOST_NAME_MAX = 0x48 + SC_IOV_MAX = 0x38 + SC_LINE_MAX = 0xf + SC_LOGIN_NAME_MAX = 0x49 + SC_MQ_OPEN_MAX = 0x2e + SC_MQ_PRIO_MAX = 0x4b + SC_NGROUPS_MAX = 0x4 + SC_OPEN_MAX = 0x5 + SC_PAGE_SIZE = 0x2f + SC_PAGESIZE = 0x2f + SC_RE_DUP_MAX = 0x10 + SC_RTSIG_MAX = 0x30 + SC_SEM_NSEMS_MAX = 0x31 + SC_SEM_VALUE_MAX = 0x32 + SC_SIGQUEUE_MAX = 0x33 + SC_STREAM_MAX = 0x1a + SC_SYMLOOP_MAX = 0x78 + SC_THREAD_DESTRUCTOR_ITERATIONS = 0x55 + SC_THREAD_KEYS_MAX = 0x56 + SC_THREAD_STACK_MIN = 0x5d + SC_THREAD_THREADS_MAX = 0x5e + SC_TIMER_MAX = 0x34 + SC_TTY_NAME_MAX = 0x65 + SC_TZNAME_MAX = 0x1b + + SC_ADVISORY_INFO = 0x41 + SC_ASYNCHRONOUS_IO = 0x1c + SC_BARRIERS = 0x42 + SC_CLOCK_SELECTION = 0x43 + SC_CPUTIME = 0x44 + SC_FSYNC = 0x26 + SC_IPV6 = 0x76 + SC_JOB_CONTROL = 0x6 + SC_MAPPED_FILES = 0x1d + SC_MEMLOCK = 0x1e + SC_MEMLOCK_RANGE = 0x1f + SC_MEMORY_PROTECTION = 0x20 + SC_MESSAGE_PASSING = 0x21 + SC_MONOTONIC_CLOCK = 0x4a + SC_PRIORITIZED_IO = 0x22 + SC_PRIORITY_SCHEDULING = 0x23 + SC_RAW_SOCKETS = 0x77 + SC_READER_WRITER_LOCKS = 0x4c + SC_REALTIME_SIGNALS = 0x24 + SC_REGEXP = 0x4d + SC_SAVED_IDS = 0x7 + SC_SEMAPHORES = 0x25 + SC_SHARED_MEMORY_OBJECTS = 0x27 + SC_SHELL = 0x4e + SC_SPAWN = 0x4f + SC_SPIN_LOCKS = 0x50 + SC_SPORADIC_SERVER = 0x51 + SC_SYNCHRONIZED_IO = 0x28 + SC_THREAD_ATTR_STACKADDR = 0x52 + SC_THREAD_ATTR_STACKSIZE = 0x53 + SC_THREAD_CPUTIME = 0x54 + SC_THREAD_PRIO_INHERIT = 0x57 + SC_THREAD_PRIO_PROTECT = 0x58 + SC_THREAD_PRIORITY_SCHEDULING = 0x59 + SC_THREAD_PROCESS_SHARED = 0x5a + SC_THREAD_SAFE_FUNCTIONS = 0x5b + SC_THREAD_SPORADIC_SERVER = 0x5c + SC_THREADS = 0x60 + SC_TIMEOUTS = 0x5f + SC_TIMERS = 0x29 + SC_TRACE = 0x61 + SC_TRACE_EVENT_FILTER = 0x62 + SC_TRACE_INHERIT = 0x63 + SC_TRACE_LOG = 0x64 + SC_TYPED_MEMORY_OBJECTS = 0x66 + SC_VERSION = 0x8 + + SC_V6_ILP32_OFF32 = 0x67 + SC_V6_ILP32_OFFBIG = 0x68 + SC_V6_LP64_OFF64 = 0x69 + SC_V6_LPBIG_OFFBIG = 0x6a + + SC_2_C_BIND = 0x12 + SC_2_C_DEV = 0x13 + SC_2_CHAR_TERM = 0x14 + SC_2_FORT_DEV = 0x15 + SC_2_FORT_RUN = 0x16 + SC_2_LOCALEDEF = 0x17 + SC_2_PBS = 0x3b + SC_2_PBS_ACCOUNTING = 0x3c + SC_2_PBS_CHECKPOINT = 0x3d + SC_2_PBS_LOCATE = 0x3e + SC_2_PBS_MESSAGE = 0x3f + SC_2_PBS_TRACK = 0x40 + SC_2_SW_DEV = 0x18 + SC_2_UPE = 0x19 + SC_2_VERSION = 0x11 + + SC_XOPEN_CRYPT = 0x6c + SC_XOPEN_ENH_I18N = 0x6d + SC_XOPEN_REALTIME = 0x6f + SC_XOPEN_REALTIME_THREADS = 0x70 + SC_XOPEN_SHM = 0x71 + SC_XOPEN_STREAMS = 0x72 + SC_XOPEN_UNIX = 0x73 + SC_XOPEN_VERSION = 0x74 + SC_XOPEN_XCU_VERSION = 0x75 + + SC_PHYS_PAGES = 0x79 + SC_NPROCESSORS_CONF = 0x39 + SC_NPROCESSORS_ONLN = 0x3a +) + +const ( + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xa + _EXPR_NEST_MAX = 0x20 + _LINE_MAX = 0x800 + _RE_DUP_MAX = 0xff + + _CLK_TCK = 0x80 + + _MAXHOSTNAMELEN = 0x100 + _MAXLOGNAME = 0x11 + _MAXSYMLINKS = 0x20 + _ATEXIT_SIZE = 0x20 + + _POSIX_ADVISORY_INFO = -0x1 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x0 + _POSIX_BARRIERS = 0x30db0 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = -0x1 + _POSIX_CPUTIME = 0x30db0 + _POSIX_FSYNC = 0x30db0 + _POSIX_IPV6 = 0x0 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x30db0 + _POSIX_MEMLOCK = -0x1 + _POSIX_MEMLOCK_RANGE = 0x30db0 + _POSIX_MEMORY_PROTECTION = 0x30db0 + _POSIX_MESSAGE_PASSING = 0x30db0 + _POSIX_MONOTONIC_CLOCK = 0x30db0 + _POSIX_PRIORITIZED_IO = -0x1 + _POSIX_PRIORITY_SCHEDULING = 0x30db0 + _POSIX_RAW_SOCKETS = 0x30db0 + _POSIX_READER_WRITER_LOCKS = 0x30db0 + _POSIX_REALTIME_SIGNALS = 0x30db0 + _POSIX_REGEXP = 0x1 + _POSIX_SEM_VALUE_MAX = 0x7fff + _POSIX_SEMAPHORES = 0x30db0 + _POSIX_SHARED_MEMORY_OBJECTS = 0x30db0 + _POSIX_SHELL = 0x1 + _POSIX_SPAWN = 0x30db0 + _POSIX_SPIN_LOCKS = 0x30db0 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = -0x1 + _POSIX_THREAD_ATTR_STACKADDR = 0x30db0 + _POSIX_THREAD_ATTR_STACKSIZE = 0x30db0 + _POSIX_THREAD_CPUTIME = 0x30db0 + _POSIX_THREAD_PRIO_INHERIT = 0x30db0 + _POSIX_THREAD_PRIO_PROTECT = 0x30db0 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x30db0 + _POSIX_THREAD_PROCESS_SHARED = -0x1 + _POSIX_THREAD_SAFE_FUNCTIONS = -0x1 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x30db0 + _POSIX_TIMEOUTS = 0x30db0 + _POSIX_TIMERS = 0x30db0 + _POSIX_TRACE = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x30db0 + + _V6_ILP32_OFF32 = -0x1 + _V6_ILP32_OFFBIG = 0x0 + _V6_LP64_OFF64 = 0x0 + _V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_CHAR_TERM = 0x1 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_PBS = -0x1 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_UPE = 0x31069 + _POSIX2_VERSION = 0x30a2c + + _XOPEN_CRYPT = -0x1 + _XOPEN_ENH_I18N = -0x1 + _XOPEN_REALTIME = -0x1 + _XOPEN_REALTIME_THREADS = -0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = -0x1 + + _PTHREAD_DESTRUCTOR_ITERATIONS = 0x4 + _PTHREAD_KEYS_MAX = 0x100 + _PTHREAD_STACK_MIN = 0x4000 +) + +const ( + _PC_NAME_MAX = 0x4 + + _PATH_DEV = "/dev/" + _PATH_ZONEINFO = "/usr/share/zoneinfo" +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_freebsd.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_freebsd.go new file mode 100644 index 0000000000000..9885411acbd74 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_freebsd.go @@ -0,0 +1,229 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_defs_freebsd.go + +//go:build freebsd +// +build freebsd + +package sysconf + +const ( + SC_AIO_LISTIO_MAX = 0x2a + SC_AIO_MAX = 0x2b + SC_AIO_PRIO_DELTA_MAX = 0x2c + SC_ARG_MAX = 0x1 + SC_ATEXIT_MAX = 0x6b + SC_BC_BASE_MAX = 0x9 + SC_BC_DIM_MAX = 0xa + SC_BC_SCALE_MAX = 0xb + SC_BC_STRING_MAX = 0xc + SC_CHILD_MAX = 0x2 + SC_CLK_TCK = 0x3 + SC_COLL_WEIGHTS_MAX = 0xd + SC_DELAYTIMER_MAX = 0x2d + SC_EXPR_NEST_MAX = 0xe + SC_GETGR_R_SIZE_MAX = 0x46 + SC_GETPW_R_SIZE_MAX = 0x47 + SC_HOST_NAME_MAX = 0x48 + SC_IOV_MAX = 0x38 + SC_LINE_MAX = 0xf + SC_LOGIN_NAME_MAX = 0x49 + SC_MQ_OPEN_MAX = 0x2e + SC_MQ_PRIO_MAX = 0x4b + SC_NGROUPS_MAX = 0x4 + SC_OPEN_MAX = 0x5 + SC_PAGE_SIZE = 0x2f + SC_PAGESIZE = 0x2f + SC_RE_DUP_MAX = 0x10 + SC_RTSIG_MAX = 0x30 + SC_SEM_NSEMS_MAX = 0x31 + SC_SEM_VALUE_MAX = 0x32 + SC_SIGQUEUE_MAX = 0x33 + SC_STREAM_MAX = 0x1a + SC_SYMLOOP_MAX = 0x78 + SC_THREAD_DESTRUCTOR_ITERATIONS = 0x55 + SC_THREAD_KEYS_MAX = 0x56 + SC_THREAD_STACK_MIN = 0x5d + SC_THREAD_THREADS_MAX = 0x5e + SC_TIMER_MAX = 0x34 + SC_TTY_NAME_MAX = 0x65 + SC_TZNAME_MAX = 0x1b + + SC_ADVISORY_INFO = 0x41 + SC_ASYNCHRONOUS_IO = 0x1c + SC_BARRIERS = 0x42 + SC_CLOCK_SELECTION = 0x43 + SC_CPUTIME = 0x44 + SC_FSYNC = 0x26 + SC_IPV6 = 0x76 + SC_JOB_CONTROL = 0x6 + SC_MAPPED_FILES = 0x1d + SC_MEMLOCK = 0x1e + SC_MEMLOCK_RANGE = 0x1f + SC_MEMORY_PROTECTION = 0x20 + SC_MESSAGE_PASSING = 0x21 + SC_MONOTONIC_CLOCK = 0x4a + SC_PRIORITIZED_IO = 0x22 + SC_PRIORITY_SCHEDULING = 0x23 + SC_RAW_SOCKETS = 0x77 + SC_READER_WRITER_LOCKS = 0x4c + SC_REALTIME_SIGNALS = 0x24 + SC_REGEXP = 0x4d + SC_SAVED_IDS = 0x7 + SC_SEMAPHORES = 0x25 + SC_SHARED_MEMORY_OBJECTS = 0x27 + SC_SHELL = 0x4e + SC_SPAWN = 0x4f + SC_SPIN_LOCKS = 0x50 + SC_SPORADIC_SERVER = 0x51 + SC_SYNCHRONIZED_IO = 0x28 + SC_THREAD_ATTR_STACKADDR = 0x52 + SC_THREAD_ATTR_STACKSIZE = 0x53 + SC_THREAD_CPUTIME = 0x54 + SC_THREAD_PRIO_INHERIT = 0x57 + SC_THREAD_PRIO_PROTECT = 0x58 + SC_THREAD_PRIORITY_SCHEDULING = 0x59 + SC_THREAD_PROCESS_SHARED = 0x5a + SC_THREAD_SAFE_FUNCTIONS = 0x5b + SC_THREAD_SPORADIC_SERVER = 0x5c + SC_THREADS = 0x60 + SC_TIMEOUTS = 0x5f + SC_TIMERS = 0x29 + SC_TRACE = 0x61 + SC_TRACE_EVENT_FILTER = 0x62 + SC_TRACE_INHERIT = 0x63 + SC_TRACE_LOG = 0x64 + SC_TYPED_MEMORY_OBJECTS = 0x66 + SC_VERSION = 0x8 + + SC_V6_ILP32_OFF32 = 0x67 + SC_V6_ILP32_OFFBIG = 0x68 + SC_V6_LP64_OFF64 = 0x69 + SC_V6_LPBIG_OFFBIG = 0x6a + + SC_2_C_BIND = 0x12 + SC_2_C_DEV = 0x13 + SC_2_CHAR_TERM = 0x14 + SC_2_FORT_DEV = 0x15 + SC_2_FORT_RUN = 0x16 + SC_2_LOCALEDEF = 0x17 + SC_2_PBS = 0x3b + SC_2_PBS_ACCOUNTING = 0x3c + SC_2_PBS_CHECKPOINT = 0x3d + SC_2_PBS_LOCATE = 0x3e + SC_2_PBS_MESSAGE = 0x3f + SC_2_PBS_TRACK = 0x40 + SC_2_SW_DEV = 0x18 + SC_2_UPE = 0x19 + SC_2_VERSION = 0x11 + + SC_XOPEN_CRYPT = 0x6c + SC_XOPEN_ENH_I18N = 0x6d + SC_XOPEN_REALTIME = 0x6f + SC_XOPEN_REALTIME_THREADS = 0x70 + SC_XOPEN_SHM = 0x71 + SC_XOPEN_STREAMS = 0x72 + SC_XOPEN_UNIX = 0x73 + SC_XOPEN_VERSION = 0x74 + SC_XOPEN_XCU_VERSION = 0x75 + + SC_PHYS_PAGES = 0x79 + SC_NPROCESSORS_CONF = 0x39 + SC_NPROCESSORS_ONLN = 0x3a +) + +const ( + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xa + _EXPR_NEST_MAX = 0x20 + _LINE_MAX = 0x800 + _MQ_PRIO_MAX = 0x40 + _RE_DUP_MAX = 0xff + _SEM_VALUE_MAX = 0x7fffffff + + _CLK_TCK = 0x80 + + _MAXHOSTNAMELEN = 0x100 + _MAXLOGNAME = 0x21 + _MAXSYMLINKS = 0x20 + _ATEXIT_SIZE = 0x20 + + _POSIX_ADVISORY_INFO = 0x30db0 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x30db0 + _POSIX_BARRIERS = 0x30db0 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = -0x1 + _POSIX_CPUTIME = 0x30db0 + _POSIX_FSYNC = 0x30db0 + _POSIX_IPV6 = 0x0 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x30db0 + _POSIX_MEMLOCK = -0x1 + _POSIX_MEMLOCK_RANGE = 0x30db0 + _POSIX_MEMORY_PROTECTION = 0x30db0 + _POSIX_MESSAGE_PASSING = 0x30db0 + _POSIX_MONOTONIC_CLOCK = 0x30db0 + _POSIX_PRIORITIZED_IO = -0x1 + _POSIX_PRIORITY_SCHEDULING = 0x0 + _POSIX_RAW_SOCKETS = 0x30db0 + _POSIX_READER_WRITER_LOCKS = 0x30db0 + _POSIX_REALTIME_SIGNALS = 0x30db0 + _POSIX_REGEXP = 0x1 + _POSIX_SEM_VALUE_MAX = 0x7fff + _POSIX_SEMAPHORES = 0x30db0 + _POSIX_SHARED_MEMORY_OBJECTS = 0x30db0 + _POSIX_SHELL = 0x1 + _POSIX_SPAWN = 0x30db0 + _POSIX_SPIN_LOCKS = 0x30db0 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = -0x1 + _POSIX_THREAD_ATTR_STACKADDR = 0x30db0 + _POSIX_THREAD_ATTR_STACKSIZE = 0x30db0 + _POSIX_THREAD_CPUTIME = 0x30db0 + _POSIX_THREAD_PRIO_INHERIT = 0x30db0 + _POSIX_THREAD_PRIO_PROTECT = 0x30db0 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x30db0 + _POSIX_THREAD_PROCESS_SHARED = 0x30db0 + _POSIX_THREAD_SAFE_FUNCTIONS = -0x1 + _POSIX_THREADS = 0x30db0 + _POSIX_TIMEOUTS = 0x30db0 + _POSIX_TIMERS = 0x30db0 + _POSIX_TRACE = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x30db0 + + _V6_ILP32_OFF32 = -0x1 + _V6_ILP32_OFFBIG = 0x0 + _V6_LP64_OFF64 = 0x0 + _V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x30db0 + _POSIX2_C_DEV = -0x1 + _POSIX2_CHAR_TERM = 0x1 + _POSIX2_LOCALEDEF = -0x1 + _POSIX2_PBS = -0x1 + _POSIX2_SW_DEV = -0x1 + _POSIX2_UPE = 0x30db0 + _POSIX2_VERSION = 0x30a2c + + _XOPEN_CRYPT = -0x1 + _XOPEN_ENH_I18N = -0x1 + _XOPEN_REALTIME = -0x1 + _XOPEN_REALTIME_THREADS = -0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = -0x1 + + _PTHREAD_DESTRUCTOR_ITERATIONS = 0x4 + _PTHREAD_KEYS_MAX = 0x100 + _PTHREAD_STACK_MIN = 0x800 +) + +const ( + _PC_NAME_MAX = 0x4 + + _PATH_DEV = "/dev/" + _PATH_ZONEINFO = "/usr/share/zoneinfo" +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_linux.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_linux.go new file mode 100644 index 0000000000000..8545a342b9088 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_linux.go @@ -0,0 +1,147 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_defs_linux.go + +//go:build linux +// +build linux + +package sysconf + +const ( + SC_AIO_LISTIO_MAX = 0x17 + SC_AIO_MAX = 0x18 + SC_AIO_PRIO_DELTA_MAX = 0x19 + SC_ARG_MAX = 0x0 + SC_ATEXIT_MAX = 0x57 + SC_BC_BASE_MAX = 0x24 + SC_BC_DIM_MAX = 0x25 + SC_BC_SCALE_MAX = 0x26 + SC_BC_STRING_MAX = 0x27 + SC_CHILD_MAX = 0x1 + SC_CLK_TCK = 0x2 + SC_COLL_WEIGHTS_MAX = 0x28 + SC_DELAYTIMER_MAX = 0x1a + SC_EXPR_NEST_MAX = 0x2a + SC_GETGR_R_SIZE_MAX = 0x45 + SC_GETPW_R_SIZE_MAX = 0x46 + SC_HOST_NAME_MAX = 0xb4 + SC_IOV_MAX = 0x3c + SC_LINE_MAX = 0x2b + SC_LOGIN_NAME_MAX = 0x47 + SC_MQ_OPEN_MAX = 0x1b + SC_MQ_PRIO_MAX = 0x1c + SC_NGROUPS_MAX = 0x3 + SC_OPEN_MAX = 0x4 + SC_PAGE_SIZE = 0x1e + SC_PAGESIZE = 0x1e + SC_THREAD_DESTRUCTOR_ITERATIONS = 0x49 + SC_THREAD_KEYS_MAX = 0x4a + SC_THREAD_STACK_MIN = 0x4b + SC_THREAD_THREADS_MAX = 0x4c + SC_RE_DUP_MAX = 0x2c + SC_RTSIG_MAX = 0x1f + SC_SEM_NSEMS_MAX = 0x20 + SC_SEM_VALUE_MAX = 0x21 + SC_SIGQUEUE_MAX = 0x22 + SC_STREAM_MAX = 0x5 + SC_SYMLOOP_MAX = 0xad + SC_TIMER_MAX = 0x23 + SC_TTY_NAME_MAX = 0x48 + SC_TZNAME_MAX = 0x6 + + SC_ADVISORY_INFO = 0x84 + SC_ASYNCHRONOUS_IO = 0xc + SC_BARRIERS = 0x85 + SC_CLOCK_SELECTION = 0x89 + SC_CPUTIME = 0x8a + SC_FSYNC = 0xf + SC_IPV6 = 0xeb + SC_JOB_CONTROL = 0x7 + SC_MAPPED_FILES = 0x10 + SC_MEMLOCK = 0x11 + SC_MEMLOCK_RANGE = 0x12 + SC_MEMORY_PROTECTION = 0x13 + SC_MESSAGE_PASSING = 0x14 + SC_MONOTONIC_CLOCK = 0x95 + SC_PRIORITIZED_IO = 0xd + SC_PRIORITY_SCHEDULING = 0xa + SC_RAW_SOCKETS = 0xec + SC_READER_WRITER_LOCKS = 0x99 + SC_REALTIME_SIGNALS = 0x9 + SC_REGEXP = 0x9b + SC_SAVED_IDS = 0x8 + SC_SEMAPHORES = 0x15 + SC_SHARED_MEMORY_OBJECTS = 0x16 + SC_SHELL = 0x9d + SC_SPAWN = 0x9f + SC_SPIN_LOCKS = 0x9a + SC_SPORADIC_SERVER = 0xa0 + SC_SS_REPL_MAX = 0xf1 + SC_SYNCHRONIZED_IO = 0xe + SC_THREAD_ATTR_STACKADDR = 0x4d + SC_THREAD_ATTR_STACKSIZE = 0x4e + SC_THREAD_CPUTIME = 0x8b + SC_THREAD_PRIO_INHERIT = 0x50 + SC_THREAD_PRIO_PROTECT = 0x51 + SC_THREAD_PRIORITY_SCHEDULING = 0x4f + SC_THREAD_PROCESS_SHARED = 0x52 + SC_THREAD_ROBUST_PRIO_INHERIT = 0xf7 + SC_THREAD_ROBUST_PRIO_PROTECT = 0xf8 + SC_THREAD_SAFE_FUNCTIONS = 0x44 + SC_THREAD_SPORADIC_SERVER = 0xa1 + SC_THREADS = 0x43 + SC_TIMEOUTS = 0xa4 + SC_TIMERS = 0xb + SC_TRACE = 0xb5 + SC_TRACE_EVENT_FILTER = 0xb6 + SC_TRACE_EVENT_NAME_MAX = 0xf2 + SC_TRACE_INHERIT = 0xb7 + SC_TRACE_LOG = 0xb8 + SC_TRACE_NAME_MAX = 0xf3 + SC_TRACE_SYS_MAX = 0xf4 + SC_TRACE_USER_EVENT_MAX = 0xf5 + SC_TYPED_MEMORY_OBJECTS = 0xa5 + SC_VERSION = 0x1d + + SC_V7_ILP32_OFF32 = 0xed + SC_V7_ILP32_OFFBIG = 0xee + SC_V7_LP64_OFF64 = 0xef + SC_V7_LPBIG_OFFBIG = 0xf0 + + SC_V6_ILP32_OFF32 = 0xb0 + SC_V6_ILP32_OFFBIG = 0xb1 + SC_V6_LP64_OFF64 = 0xb2 + SC_V6_LPBIG_OFFBIG = 0xb3 + + SC_2_C_BIND = 0x2f + SC_2_C_DEV = 0x30 + SC_2_C_VERSION = 0x60 + SC_2_CHAR_TERM = 0x5f + SC_2_FORT_DEV = 0x31 + SC_2_FORT_RUN = 0x32 + SC_2_LOCALEDEF = 0x34 + SC_2_PBS = 0xa8 + SC_2_PBS_ACCOUNTING = 0xa9 + SC_2_PBS_CHECKPOINT = 0xaf + SC_2_PBS_LOCATE = 0xaa + SC_2_PBS_MESSAGE = 0xab + SC_2_PBS_TRACK = 0xac + SC_2_SW_DEV = 0x33 + SC_2_UPE = 0x61 + SC_2_VERSION = 0x2e + + SC_XOPEN_CRYPT = 0x5c + SC_XOPEN_ENH_I18N = 0x5d + SC_XOPEN_REALTIME = 0x82 + SC_XOPEN_REALTIME_THREADS = 0x83 + SC_XOPEN_SHM = 0x5e + SC_XOPEN_STREAMS = 0xf6 + SC_XOPEN_UNIX = 0x5b + SC_XOPEN_VERSION = 0x59 + SC_XOPEN_XCU_VERSION = 0x5a + + SC_PHYS_PAGES = 0x55 + SC_AVPHYS_PAGES = 0x56 + SC_NPROCESSORS_CONF = 0x53 + SC_NPROCESSORS_ONLN = 0x54 + SC_UIO_MAXIOV = 0x3c +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go new file mode 100644 index 0000000000000..d2aaf07770491 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_netbsd.go @@ -0,0 +1,164 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_defs_netbsd.go + +//go:build netbsd +// +build netbsd + +package sysconf + +const ( + SC_ARG_MAX = 0x1 + SC_CHILD_MAX = 0x2 + SC_NGROUPS_MAX = 0x4 + SC_OPEN_MAX = 0x5 + SC_JOB_CONTROL = 0x6 + SC_SAVED_IDS = 0x7 + SC_VERSION = 0x8 + SC_BC_BASE_MAX = 0x9 + SC_BC_DIM_MAX = 0xa + SC_BC_SCALE_MAX = 0xb + SC_BC_STRING_MAX = 0xc + SC_COLL_WEIGHTS_MAX = 0xd + SC_EXPR_NEST_MAX = 0xe + SC_LINE_MAX = 0xf + SC_RE_DUP_MAX = 0x10 + SC_2_VERSION = 0x11 + SC_2_C_BIND = 0x12 + SC_2_C_DEV = 0x13 + SC_2_CHAR_TERM = 0x14 + SC_2_FORT_DEV = 0x15 + SC_2_FORT_RUN = 0x16 + SC_2_LOCALEDEF = 0x17 + SC_2_SW_DEV = 0x18 + SC_2_UPE = 0x19 + SC_STREAM_MAX = 0x1a + SC_TZNAME_MAX = 0x1b + SC_PAGESIZE = 0x1c + SC_PAGE_SIZE = 0x1c + SC_FSYNC = 0x1d + SC_XOPEN_SHM = 0x1e + SC_SYNCHRONIZED_IO = 0x1f + SC_IOV_MAX = 0x20 + SC_MAPPED_FILES = 0x21 + SC_MEMLOCK = 0x22 + SC_MEMLOCK_RANGE = 0x23 + SC_MEMORY_PROTECTION = 0x24 + SC_LOGIN_NAME_MAX = 0x25 + SC_MONOTONIC_CLOCK = 0x26 + SC_CLK_TCK = 0x27 + SC_ATEXIT_MAX = 0x28 + SC_THREADS = 0x29 + SC_SEMAPHORES = 0x2a + SC_BARRIERS = 0x2b + SC_TIMERS = 0x2c + SC_SPIN_LOCKS = 0x2d + SC_READER_WRITER_LOCKS = 0x2e + SC_GETGR_R_SIZE_MAX = 0x2f + SC_GETPW_R_SIZE_MAX = 0x30 + SC_CLOCK_SELECTION = 0x31 + SC_ASYNCHRONOUS_IO = 0x32 + SC_AIO_LISTIO_MAX = 0x33 + SC_AIO_MAX = 0x34 + SC_MESSAGE_PASSING = 0x35 + SC_MQ_OPEN_MAX = 0x36 + SC_MQ_PRIO_MAX = 0x37 + SC_PRIORITY_SCHEDULING = 0x38 + SC_THREAD_DESTRUCTOR_ITERATIONS = 0x39 + SC_THREAD_KEYS_MAX = 0x3a + SC_THREAD_STACK_MIN = 0x3b + SC_THREAD_THREADS_MAX = 0x3c + SC_THREAD_ATTR_STACKADDR = 0x3d + SC_THREAD_ATTR_STACKSIZE = 0x3e + SC_THREAD_PRIORITY_SCHEDULING = 0x3f + SC_THREAD_PRIO_INHERIT = 0x40 + SC_THREAD_PRIO_PROTECT = 0x41 + SC_THREAD_PROCESS_SHARED = 0x42 + SC_THREAD_SAFE_FUNCTIONS = 0x43 + SC_TTY_NAME_MAX = 0x44 + SC_HOST_NAME_MAX = 0x45 + SC_PASS_MAX = 0x46 + SC_REGEXP = 0x47 + SC_SHELL = 0x48 + SC_SYMLOOP_MAX = 0x49 + + SC_V6_ILP32_OFF32 = 0x4a + SC_V6_ILP32_OFFBIG = 0x4b + SC_V6_LP64_OFF64 = 0x4c + SC_V6_LPBIG_OFFBIG = 0x4d + SC_2_PBS = 0x50 + SC_2_PBS_ACCOUNTING = 0x51 + SC_2_PBS_CHECKPOINT = 0x52 + SC_2_PBS_LOCATE = 0x53 + SC_2_PBS_MESSAGE = 0x54 + SC_2_PBS_TRACK = 0x55 + + SC_SPAWN = 0x56 + SC_SHARED_MEMORY_OBJECTS = 0x57 + + SC_TIMER_MAX = 0x58 + SC_SEM_NSEMS_MAX = 0x59 + SC_CPUTIME = 0x5a + SC_THREAD_CPUTIME = 0x5b + SC_DELAYTIMER_MAX = 0x5c + SC_SIGQUEUE_MAX = 0x5d + SC_REALTIME_SIGNALS = 0x5e + + SC_PHYS_PAGES = 0x79 + + SC_NPROCESSORS_CONF = 0x3e9 + SC_NPROCESSORS_ONLN = 0x3ea + + SC_SCHED_RT_TS = 0x7d1 + SC_SCHED_PRI_MIN = 0x7d2 + SC_SCHED_PRI_MAX = 0x7d3 +) + +const ( + _MAXHOSTNAMELEN = 0x100 + _MAXLOGNAME = 0x10 + _MAXSYMLINKS = 0x20 + + _POSIX_ARG_MAX = 0x1000 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CPUTIME = 0x30db0 + _POSIX_DELAYTIMER_MAX = 0x20 + _POSIX_PRIORITY_SCHEDULING = 0x30db0 + _POSIX_REGEXP = 0x1 + _POSIX_SHARED_MEMORY_OBJECTS = 0x0 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x30db0 + _POSIX_THREAD_ATTR_STACKSIZE = 0x30db0 + _POSIX_THREAD_CPUTIME = 0x30db0 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_KEYS_MAX = 0x80 + _POSIX_THREAD_PRIO_PROTECT = 0x30db0 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x30db0 + _POSIX_TIMER_MAX = 0x20 + _POSIX_VERSION = 0x30db0 + + _POSIX2_VERSION = 0x30db0 + + _FOPEN_MAX = 0x14 + _NAME_MAX = 0x1ff + _RE_DUP_MAX = 0xff + + _BC_BASE_MAX = 0x7fffffff + _BC_DIM_MAX = 0xffff + _BC_SCALE_MAX = 0x7fffffff + _BC_STRING_MAX = 0x7fffffff + _COLL_WEIGHTS_MAX = 0x2 + _EXPR_NEST_MAX = 0x20 + _LINE_MAX = 0x800 + + _GETGR_R_SIZE_MAX = 0x400 + _GETPW_R_SIZE_MAX = 0x400 + + _PATH_DEV = "/dev/" + _PATH_ZONEINFO = "/usr/share/zoneinfo" + + _PASSWORD_LEN = 0x80 +) + +const _PC_NAME_MAX = 0x4 diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go new file mode 100644 index 0000000000000..badc66cbda879 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_openbsd.go @@ -0,0 +1,263 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_defs_openbsd.go + +//go:build openbsd +// +build openbsd + +package sysconf + +const ( + SC_AIO_LISTIO_MAX = 0x2a + SC_AIO_MAX = 0x2b + SC_AIO_PRIO_DELTA_MAX = 0x2c + SC_ARG_MAX = 0x1 + SC_ATEXIT_MAX = 0x2e + SC_BC_BASE_MAX = 0x9 + SC_BC_DIM_MAX = 0xa + SC_BC_SCALE_MAX = 0xb + SC_BC_STRING_MAX = 0xc + SC_CHILD_MAX = 0x2 + SC_CLK_TCK = 0x3 + SC_COLL_WEIGHTS_MAX = 0xd + SC_DELAYTIMER_MAX = 0x32 + SC_EXPR_NEST_MAX = 0xe + SC_GETGR_R_SIZE_MAX = 0x64 + SC_GETPW_R_SIZE_MAX = 0x65 + SC_HOST_NAME_MAX = 0x21 + SC_IOV_MAX = 0x33 + SC_LINE_MAX = 0xf + SC_LOGIN_NAME_MAX = 0x66 + SC_MQ_OPEN_MAX = 0x3a + SC_MQ_PRIO_MAX = 0x3b + SC_NGROUPS_MAX = 0x4 + SC_OPEN_MAX = 0x5 + SC_PAGE_SIZE = 0x1c + SC_PAGESIZE = 0x1c + SC_THREAD_DESTRUCTOR_ITERATIONS = 0x50 + SC_THREAD_KEYS_MAX = 0x51 + SC_THREAD_STACK_MIN = 0x59 + SC_THREAD_THREADS_MAX = 0x5a + SC_RE_DUP_MAX = 0x10 + SC_SEM_NSEMS_MAX = 0x1f + SC_SEM_VALUE_MAX = 0x20 + SC_SIGQUEUE_MAX = 0x46 + SC_STREAM_MAX = 0x1a + SC_SYMLOOP_MAX = 0x4c + SC_TIMER_MAX = 0x5d + SC_TTY_NAME_MAX = 0x6b + SC_TZNAME_MAX = 0x1b + + SC_ADVISORY_INFO = 0x29 + SC_ASYNCHRONOUS_IO = 0x2d + SC_BARRIERS = 0x2f + SC_CLOCK_SELECTION = 0x30 + SC_CPUTIME = 0x31 + SC_FSYNC = 0x1d + SC_IPV6 = 0x34 + SC_JOB_CONTROL = 0x6 + SC_MAPPED_FILES = 0x35 + SC_MEMLOCK = 0x36 + SC_MEMLOCK_RANGE = 0x37 + SC_MEMORY_PROTECTION = 0x38 + SC_MESSAGE_PASSING = 0x39 + SC_MONOTONIC_CLOCK = 0x22 + SC_PRIORITIZED_IO = 0x3c + SC_PRIORITY_SCHEDULING = 0x3d + SC_RAW_SOCKETS = 0x3e + SC_READER_WRITER_LOCKS = 0x3f + SC_REALTIME_SIGNALS = 0x40 + SC_REGEXP = 0x41 + SC_SAVED_IDS = 0x7 + SC_SEMAPHORES = 0x43 + SC_SHARED_MEMORY_OBJECTS = 0x44 + SC_SHELL = 0x45 + SC_SPAWN = 0x47 + SC_SPIN_LOCKS = 0x48 + SC_SPORADIC_SERVER = 0x49 + SC_SS_REPL_MAX = 0x4a + SC_SYNCHRONIZED_IO = 0x4b + SC_THREAD_ATTR_STACKADDR = 0x4d + SC_THREAD_ATTR_STACKSIZE = 0x4e + SC_THREAD_CPUTIME = 0x4f + SC_THREAD_PRIO_INHERIT = 0x52 + SC_THREAD_PRIO_PROTECT = 0x53 + SC_THREAD_PRIORITY_SCHEDULING = 0x54 + SC_THREAD_PROCESS_SHARED = 0x55 + SC_THREAD_ROBUST_PRIO_INHERIT = 0x56 + SC_THREAD_ROBUST_PRIO_PROTECT = 0x57 + SC_THREAD_SAFE_FUNCTIONS = 0x67 + SC_THREAD_SPORADIC_SERVER = 0x58 + SC_THREADS = 0x5b + SC_TIMEOUTS = 0x5c + SC_TIMERS = 0x5e + SC_TRACE = 0x5f + SC_TRACE_EVENT_FILTER = 0x60 + SC_TRACE_EVENT_NAME_MAX = 0x61 + SC_TRACE_INHERIT = 0x62 + SC_TRACE_LOG = 0x63 + SC_TRACE_NAME_MAX = 0x68 + SC_TRACE_SYS_MAX = 0x69 + SC_TRACE_USER_EVENT_MAX = 0x6a + SC_TYPED_MEMORY_OBJECTS = 0x6c + SC_VERSION = 0x8 + + SC_V7_ILP32_OFF32 = 0x71 + SC_V7_ILP32_OFFBIG = 0x72 + SC_V7_LP64_OFF64 = 0x73 + SC_V7_LPBIG_OFFBIG = 0x74 + + SC_V6_ILP32_OFF32 = 0x6d + SC_V6_ILP32_OFFBIG = 0x6e + SC_V6_LP64_OFF64 = 0x6f + SC_V6_LPBIG_OFFBIG = 0x70 + + SC_2_C_BIND = 0x12 + SC_2_C_DEV = 0x13 + SC_2_CHAR_TERM = 0x14 + SC_2_FORT_DEV = 0x15 + SC_2_FORT_RUN = 0x16 + SC_2_LOCALEDEF = 0x17 + SC_2_PBS = 0x23 + SC_2_PBS_ACCOUNTING = 0x24 + SC_2_PBS_CHECKPOINT = 0x25 + SC_2_PBS_LOCATE = 0x26 + SC_2_PBS_MESSAGE = 0x27 + SC_2_PBS_TRACK = 0x28 + SC_2_SW_DEV = 0x18 + SC_2_UPE = 0x19 + SC_2_VERSION = 0x11 + + SC_XOPEN_CRYPT = 0x75 + SC_XOPEN_ENH_I18N = 0x76 + SC_XOPEN_REALTIME = 0x78 + SC_XOPEN_REALTIME_THREADS = 0x79 + SC_XOPEN_SHM = 0x1e + SC_XOPEN_STREAMS = 0x7a + SC_XOPEN_UNIX = 0x7b + SC_XOPEN_UUCP = 0x7c + SC_XOPEN_VERSION = 0x7d + + SC_AVPHYS_PAGES = 0x1f5 + SC_PHYS_PAGES = 0x1f4 + SC_NPROCESSORS_CONF = 0x1f6 + SC_NPROCESSORS_ONLN = 0x1f7 +) + +const ( + _HOST_NAME_MAX = 0xff + _IOV_MAX = 0x400 + _LOGIN_NAME_MAX = 0x20 + _PTHREAD_DESTRUCTOR_ITERATIONS = 0x4 + _PTHREAD_KEYS_MAX = 0x100 + _PTHREAD_STACK_MIN = 0x1000 + _PTHREAD_THREADS_MAX = 0xffffffffffffffff + _SEM_VALUE_MAX = 0xffffffff + _SYMLOOP_MAX = 0x20 + _TTY_NAME_MAX = 0x104 + + _GR_BUF_LEN = 0xa40 + _PW_BUF_LEN = 0x400 + + _CLK_TCK = 0x64 + + _POSIX_ADVISORY_INFO = -0x1 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = -0x1 + _POSIX_BARRIERS = 0x30db0 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = -0x1 + _POSIX_CPUTIME = 0x31069 + _POSIX_FSYNC = 0x30db0 + _POSIX_IPV6 = 0x0 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x30db0 + _POSIX_MEMLOCK = 0x30db0 + _POSIX_MEMLOCK_RANGE = 0x30db0 + _POSIX_MEMORY_PROTECTION = 0x30db0 + _POSIX_MESSAGE_PASSING = -0x1 + _POSIX_MONOTONIC_CLOCK = 0x30db0 + _POSIX_PRIORITIZED_IO = -0x1 + _POSIX_PRIORITY_SCHEDULING = -0x1 + _POSIX_RAW_SOCKETS = 0x30db0 + _POSIX_READER_WRITER_LOCKS = 0x30db0 + _POSIX_REALTIME_SIGNALS = -0x1 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x30db0 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SPAWN = 0x30db0 + _POSIX_SPIN_LOCKS = 0x30db0 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = -0x1 + _POSIX_THREAD_ATTR_STACKADDR = 0x30db0 + _POSIX_THREAD_ATTR_STACKSIZE = 0x30db0 + _POSIX_THREAD_CPUTIME = 0x31069 + _POSIX_THREAD_KEYS_MAX = 0x80 + _POSIX_THREAD_PRIO_INHERIT = -0x1 + _POSIX_THREAD_PRIO_PROTECT = -0x1 + _POSIX_THREAD_PRIORITY_SCHEDULING = -0x1 + _POSIX_THREAD_PROCESS_SHARED = -0x1 + _POSIX_THREAD_ROBUST_PRIO_INHERIT = -0x1 + _POSIX_THREAD_ROBUST_PRIO_PROTECT = -0x1 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x30db0 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x30db0 + _POSIX_TIMERS = -0x1 + _POSIX_TIMEOUTS = 0x30db0 + _POSIX_TRACE = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = -0x1 + _POSIX_V7_ILP32_OFFBIG = 0x0 + _POSIX_V7_LP64_OFF64 = 0x0 + _POSIX_V7_LPBIG_OFFBIG = 0x0 + + _POSIX_V6_ILP32_OFF32 = -0x1 + _POSIX_V6_ILP32_OFFBIG = 0x0 + _POSIX_V6_LP64_OFF64 = 0x0 + _POSIX_V6_LPBIG_OFFBIG = 0x0 + + _POSIX2_C_BIND = 0x30db0 + _POSIX2_C_DEV = -0x1 + _POSIX2_CHAR_TERM = 0x1 + _POSIX2_LOCALEDEF = -0x1 + _POSIX2_PBS = -0x1 + _POSIX2_SW_DEV = 0x30db0 + _POSIX2_UPE = 0x30db0 + _POSIX2_VERSION = 0x31069 + + _XOPEN_CRYPT = 0x1 + _XOPEN_ENH_I18N = -0x1 + _XOPEN_REALTIME = -0x1 + _XOPEN_REALTIME_THREADS = -0x1 + _XOPEN_SHM = 0x1 + _XOPEN_STREAMS = -0x1 + _XOPEN_UNIX = -0x1 + _XOPEN_UUCP = -0x1 + + _FOPEN_MAX = 0x14 + _NAME_MAX = 0xff + _RE_DUP_MAX = 0xff + + _BC_BASE_MAX = 0x7fffffff + _BC_DIM_MAX = 0xffff + _BC_SCALE_MAX = 0x7fffffff + _BC_STRING_MAX = 0x7fffffff + _COLL_WEIGHTS_MAX = 0x2 + _EXPR_NEST_MAX = 0x20 + _LINE_MAX = 0x800 + + _SHRT_MAX = 0x7fff + + _PATH_ZONEINFO = "/usr/share/zoneinfo" +) + +const ( + _CHAR_BIT = 0x8 + + _INT_MAX = 0x7fffffff + + sizeofOffT = 0x8 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_solaris.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_solaris.go new file mode 100644 index 0000000000000..29b6f8746a1f0 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_solaris.go @@ -0,0 +1,139 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_defs_solaris.go + +//go:build solaris +// +build solaris + +package sysconf + +const ( + SC_AIO_LISTIO_MAX = 0x12 + SC_AIO_MAX = 0x13 + SC_AIO_PRIO_DELTA_MAX = 0x14 + SC_ARG_MAX = 0x1 + SC_ATEXIT_MAX = 0x4c + SC_BC_BASE_MAX = 0x36 + SC_BC_DIM_MAX = 0x37 + SC_BC_SCALE_MAX = 0x38 + SC_BC_STRING_MAX = 0x39 + SC_CHILD_MAX = 0x2 + SC_CLK_TCK = 0x3 + SC_COLL_WEIGHTS_MAX = 0x3a + SC_DELAYTIMER_MAX = 0x16 + SC_EXPR_NEST_MAX = 0x3b + SC_GETGR_R_SIZE_MAX = 0x239 + SC_GETPW_R_SIZE_MAX = 0x23a + SC_HOST_NAME_MAX = 0x2df + SC_IOV_MAX = 0x4d + SC_LINE_MAX = 0x3c + SC_LOGIN_NAME_MAX = 0x23b + SC_MQ_OPEN_MAX = 0x1d + SC_MQ_PRIO_MAX = 0x1e + SC_NGROUPS_MAX = 0x4 + SC_OPEN_MAX = 0x5 + SC_PAGE_SIZE = 0xb + SC_PAGESIZE = 0xb + SC_THREAD_DESTRUCTOR_ITERATIONS = 0x238 + SC_THREAD_KEYS_MAX = 0x23c + SC_THREAD_STACK_MIN = 0x23d + SC_THREAD_THREADS_MAX = 0x23e + SC_RE_DUP_MAX = 0x3d + SC_RTSIG_MAX = 0x22 + SC_SEM_NSEMS_MAX = 0x24 + SC_SEM_VALUE_MAX = 0x25 + SC_SIGQUEUE_MAX = 0x27 + SC_STREAM_MAX = 0x10 + SC_SYMLOOP_MAX = 0x2e8 + SC_TIMER_MAX = 0x2c + SC_TTY_NAME_MAX = 0x23f + SC_TZNAME_MAX = 0x11 + + SC_ADVISORY_INFO = 0x2db + SC_ASYNCHRONOUS_IO = 0x15 + SC_BARRIERS = 0x2dc + SC_CLOCK_SELECTION = 0x2dd + SC_CPUTIME = 0x2de + SC_FSYNC = 0x17 + SC_IPV6 = 0x2fa + SC_JOB_CONTROL = 0x6 + SC_MAPPED_FILES = 0x18 + SC_MEMLOCK = 0x19 + SC_MEMLOCK_RANGE = 0x1a + SC_MEMORY_PROTECTION = 0x1b + SC_MESSAGE_PASSING = 0x1c + SC_MONOTONIC_CLOCK = 0x2e0 + SC_PRIORITIZED_IO = 0x1f + SC_PRIORITY_SCHEDULING = 0x20 + SC_RAW_SOCKETS = 0x2fb + SC_READER_WRITER_LOCKS = 0x2e1 + SC_REALTIME_SIGNALS = 0x21 + SC_REGEXP = 0x2e2 + SC_SAVED_IDS = 0x7 + SC_SEMAPHORES = 0x23 + SC_SHARED_MEMORY_OBJECTS = 0x26 + SC_SHELL = 0x2e3 + SC_SPAWN = 0x2e4 + SC_SPIN_LOCKS = 0x2e5 + SC_SPORADIC_SERVER = 0x2e6 + SC_SS_REPL_MAX = 0x2e7 + SC_SYNCHRONIZED_IO = 0x2a + SC_THREAD_ATTR_STACKADDR = 0x241 + SC_THREAD_ATTR_STACKSIZE = 0x242 + SC_THREAD_CPUTIME = 0x2e9 + SC_THREAD_PRIO_INHERIT = 0x244 + SC_THREAD_PRIO_PROTECT = 0x245 + SC_THREAD_PRIORITY_SCHEDULING = 0x243 + SC_THREAD_PROCESS_SHARED = 0x246 + SC_THREAD_SAFE_FUNCTIONS = 0x247 + SC_THREAD_SPORADIC_SERVER = 0x2ea + SC_THREADS = 0x240 + SC_TIMEOUTS = 0x2eb + SC_TIMERS = 0x2b + SC_TRACE = 0x2ec + SC_TRACE_EVENT_FILTER = 0x2ed + SC_TRACE_EVENT_NAME_MAX = 0x2ee + SC_TRACE_INHERIT = 0x2ef + SC_TRACE_LOG = 0x2f0 + SC_TRACE_NAME_MAX = 0x2f1 + SC_TRACE_SYS_MAX = 0x2f2 + SC_TRACE_USER_EVENT_MAX = 0x2f3 + SC_TYPED_MEMORY_OBJECTS = 0x2f4 + SC_VERSION = 0x8 + + SC_V6_ILP32_OFF32 = 0x2f5 + SC_V6_ILP32_OFFBIG = 0x2f6 + SC_V6_LP64_OFF64 = 0x2f7 + SC_V6_LPBIG_OFFBIG = 0x2f8 + + SC_2_C_BIND = 0x2d + SC_2_C_DEV = 0x2e + SC_2_C_VERSION = 0x2f + SC_2_CHAR_TERM = 0x42 + SC_2_FORT_DEV = 0x30 + SC_2_FORT_RUN = 0x31 + SC_2_LOCALEDEF = 0x32 + SC_2_PBS = 0x2d4 + SC_2_PBS_ACCOUNTING = 0x2d5 + SC_2_PBS_CHECKPOINT = 0x2d6 + SC_2_PBS_LOCATE = 0x2d8 + SC_2_PBS_MESSAGE = 0x2d9 + SC_2_PBS_TRACK = 0x2da + SC_2_SW_DEV = 0x33 + SC_2_UPE = 0x34 + SC_2_VERSION = 0x35 + + SC_XOPEN_CRYPT = 0x3e + SC_XOPEN_ENH_I18N = 0x3f + SC_XOPEN_REALTIME = 0x2ce + SC_XOPEN_REALTIME_THREADS = 0x2cf + SC_XOPEN_SHM = 0x40 + SC_XOPEN_STREAMS = 0x2f9 + SC_XOPEN_UNIX = 0x4e + SC_XOPEN_VERSION = 0xc + SC_XOPEN_XCU_VERSION = 0x43 + + SC_PHYS_PAGES = 0x1f4 + SC_AVPHYS_PAGES = 0x1f5 + SC_NPROCESSORS_CONF = 0xe + SC_NPROCESSORS_ONLN = 0xf +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_386.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_386.go new file mode 100644 index 0000000000000..478fe63a98e64 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_386.go @@ -0,0 +1,12 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_freebsd.go + +//go:build freebsd && 386 +// +build freebsd,386 + +package sysconf + +const ( + _LONG_MAX = 0x7fffffff + _SHRT_MAX = 0x7fff +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_amd64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_amd64.go new file mode 100644 index 0000000000000..7f58a4d8bac47 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_amd64.go @@ -0,0 +1,12 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_freebsd.go + +//go:build freebsd && amd64 +// +build freebsd,amd64 + +package sysconf + +const ( + _LONG_MAX = 0x7fffffffffffffff + _SHRT_MAX = 0x7fff +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm.go new file mode 100644 index 0000000000000..deb47595ba437 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm.go @@ -0,0 +1,12 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_freebsd.go + +//go:build freebsd && arm +// +build freebsd,arm + +package sysconf + +const ( + _LONG_MAX = 0x7fffffff + _SHRT_MAX = 0x7fff +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm64.go new file mode 100644 index 0000000000000..556ba3da2120a --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_arm64.go @@ -0,0 +1,12 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_freebsd.go + +//go:build freebsd && arm64 +// +build freebsd,arm64 + +package sysconf + +const ( + _LONG_MAX = 0x7fffffffffffffff + _SHRT_MAX = 0x7fff +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go new file mode 100644 index 0000000000000..b7cff760b16c9 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_freebsd_riscv64.go @@ -0,0 +1,12 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_freebsd.go + +//go:build freebsd && riscv64 +// +build freebsd,riscv64 + +package sysconf + +const ( + _LONG_MAX = 0x7fffffffffffffff + _SHRT_MAX = 0x7fff +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_386.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_386.go new file mode 100644 index 0000000000000..16ee7ea64cfbc --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_386.go @@ -0,0 +1,114 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_linux.go + +//go:build linux && 386 +// +build linux,386 + +package sysconf + +const ( + _AIO_PRIO_DELTA_MAX = 0x14 + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xff + _DELAYTIMER_MAX = 0x7fffffff + _EXPR_NEST_MAX = 0x20 + _HOST_NAME_MAX = 0x40 + _LINE_MAX = 0x800 + _LOGIN_NAME_MAX = 0x100 + _MQ_PRIO_MAX = 0x8000 + _NGROUPS_MAX = 0x10000 + _NSS_BUFLEN_GROUP = 0x400 + _NSS_BUFLEN_PASSWD = 0x400 + _OPEN_MAX = 0x100 + _PTHREAD_KEYS_MAX = 0x400 + _PTHREAD_STACK_MIN = 0x4000 + _RE_DUP_MAX = 0x7fff + _RTSIG_MAX = 0x20 + _SEM_VALUE_MAX = 0x7fffffff + _STREAM_MAX = 0x10 + _SYMLOOP_MAX = -0x1 + _TTY_NAME_MAX = 0x20 + + _UIO_MAXIOV = 0x400 + + _INT_MAX = 0x7fffffff + + _POSIX_ADVISORY_INFO = 0x31069 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x31069 + _POSIX_BARRIERS = 0x31069 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = 0x31069 + _POSIX_CPUTIME = 0x0 + _POSIX_FSYNC = 0x31069 + _POSIX_IPV6 = 0x31069 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x31069 + _POSIX_MEMLOCK = 0x31069 + _POSIX_MEMLOCK_RANGE = 0x31069 + _POSIX_MEMORY_PROTECTION = 0x31069 + _POSIX_MESSAGE_PASSING = 0x31069 + _POSIX_MONOTONIC_CLOCK = 0x0 + _POSIX_PRIORITIZED_IO = 0x31069 + _POSIX_PRIORITY_SCHEDULING = 0x31069 + _POSIX_RAW_SOCKETS = 0x31069 + _POSIX_READER_WRITER_LOCKS = 0x31069 + _POSIX_REALTIME_SIGNALS = 0x31069 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x31069 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_SPIN_LOCKS = 0x31069 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x31069 + _POSIX_THREAD_ATTR_STACKSIZE = 0x31069 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_PRIO_INHERIT = 0x31069 + _POSIX_THREAD_PRIO_PROTECT = 0x31069 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x31069 + _POSIX_THREAD_PROCESS_SHARED = 0x31069 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x31069 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x31069 + _POSIX_TIMEOUTS = 0x31069 + _POSIX_TIMERS = 0x31069 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = 0x1 + _POSIX_V7_ILP32_OFFBIG = 0x1 + _POSIX_V7_LP64_OFF64 = -0x1 + _POSIX_V7_LPBIG_OFFBIG = -0x1 + + _POSIX_V6_ILP32_OFF32 = 0x1 + _POSIX_V6_ILP32_OFFBIG = 0x1 + _POSIX_V6_LP64_OFF64 = -0x1 + _POSIX_V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_C_VERSION = 0x31069 + _POSIX2_CHAR_TERM = 0x31069 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_VERSION = 0x31069 + + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = 0x1 + _XOPEN_REALTIME_THREADS = 0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x2bc + _XOPEN_XCU_VERSION = 0x4 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_amd64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_amd64.go new file mode 100644 index 0000000000000..39aee349f2168 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_amd64.go @@ -0,0 +1,114 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_linux.go + +//go:build linux && amd64 +// +build linux,amd64 + +package sysconf + +const ( + _AIO_PRIO_DELTA_MAX = 0x14 + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xff + _DELAYTIMER_MAX = 0x7fffffff + _EXPR_NEST_MAX = 0x20 + _HOST_NAME_MAX = 0x40 + _LINE_MAX = 0x800 + _LOGIN_NAME_MAX = 0x100 + _MQ_PRIO_MAX = 0x8000 + _NGROUPS_MAX = 0x10000 + _NSS_BUFLEN_GROUP = 0x400 + _NSS_BUFLEN_PASSWD = 0x400 + _OPEN_MAX = 0x100 + _PTHREAD_KEYS_MAX = 0x400 + _PTHREAD_STACK_MIN = 0x4000 + _RE_DUP_MAX = 0x7fff + _RTSIG_MAX = 0x20 + _SEM_VALUE_MAX = 0x7fffffff + _STREAM_MAX = 0x10 + _SYMLOOP_MAX = -0x1 + _TTY_NAME_MAX = 0x20 + + _UIO_MAXIOV = 0x400 + + _INT_MAX = 0x7fffffff + + _POSIX_ADVISORY_INFO = 0x31069 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x31069 + _POSIX_BARRIERS = 0x31069 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = 0x31069 + _POSIX_CPUTIME = 0x0 + _POSIX_FSYNC = 0x31069 + _POSIX_IPV6 = 0x31069 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x31069 + _POSIX_MEMLOCK = 0x31069 + _POSIX_MEMLOCK_RANGE = 0x31069 + _POSIX_MEMORY_PROTECTION = 0x31069 + _POSIX_MESSAGE_PASSING = 0x31069 + _POSIX_MONOTONIC_CLOCK = 0x0 + _POSIX_PRIORITIZED_IO = 0x31069 + _POSIX_PRIORITY_SCHEDULING = 0x31069 + _POSIX_RAW_SOCKETS = 0x31069 + _POSIX_READER_WRITER_LOCKS = 0x31069 + _POSIX_REALTIME_SIGNALS = 0x31069 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x31069 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_SPIN_LOCKS = 0x31069 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x31069 + _POSIX_THREAD_ATTR_STACKSIZE = 0x31069 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_PRIO_INHERIT = 0x31069 + _POSIX_THREAD_PRIO_PROTECT = 0x31069 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x31069 + _POSIX_THREAD_PROCESS_SHARED = 0x31069 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x31069 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x31069 + _POSIX_TIMEOUTS = 0x31069 + _POSIX_TIMERS = 0x31069 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = -0x1 + _POSIX_V7_ILP32_OFFBIG = -0x1 + _POSIX_V7_LP64_OFF64 = 0x1 + _POSIX_V7_LPBIG_OFFBIG = -0x1 + + _POSIX_V6_ILP32_OFF32 = -0x1 + _POSIX_V6_ILP32_OFFBIG = -0x1 + _POSIX_V6_LP64_OFF64 = 0x1 + _POSIX_V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_C_VERSION = 0x31069 + _POSIX2_CHAR_TERM = 0x31069 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_VERSION = 0x31069 + + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = 0x1 + _XOPEN_REALTIME_THREADS = 0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x2bc + _XOPEN_XCU_VERSION = 0x4 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm.go new file mode 100644 index 0000000000000..2e401164e3fce --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm.go @@ -0,0 +1,114 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_linux.go + +//go:build linux && arm +// +build linux,arm + +package sysconf + +const ( + _AIO_PRIO_DELTA_MAX = 0x14 + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xff + _DELAYTIMER_MAX = 0x7fffffff + _EXPR_NEST_MAX = 0x20 + _HOST_NAME_MAX = 0x40 + _LINE_MAX = 0x800 + _LOGIN_NAME_MAX = 0x100 + _MQ_PRIO_MAX = 0x8000 + _NGROUPS_MAX = 0x10000 + _NSS_BUFLEN_GROUP = 0x400 + _NSS_BUFLEN_PASSWD = 0x400 + _OPEN_MAX = 0x100 + _PTHREAD_KEYS_MAX = 0x400 + _PTHREAD_STACK_MIN = 0x4000 + _RE_DUP_MAX = 0x7fff + _RTSIG_MAX = 0x20 + _SEM_VALUE_MAX = 0x7fffffff + _STREAM_MAX = 0x10 + _SYMLOOP_MAX = -0x1 + _TTY_NAME_MAX = 0x20 + + _UIO_MAXIOV = 0x400 + + _INT_MAX = 0x7fffffff + + _POSIX_ADVISORY_INFO = 0x31069 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x31069 + _POSIX_BARRIERS = 0x31069 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = 0x31069 + _POSIX_CPUTIME = 0x0 + _POSIX_FSYNC = 0x31069 + _POSIX_IPV6 = 0x31069 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x31069 + _POSIX_MEMLOCK = 0x31069 + _POSIX_MEMLOCK_RANGE = 0x31069 + _POSIX_MEMORY_PROTECTION = 0x31069 + _POSIX_MESSAGE_PASSING = 0x31069 + _POSIX_MONOTONIC_CLOCK = 0x0 + _POSIX_PRIORITIZED_IO = 0x31069 + _POSIX_PRIORITY_SCHEDULING = 0x31069 + _POSIX_RAW_SOCKETS = 0x31069 + _POSIX_READER_WRITER_LOCKS = 0x31069 + _POSIX_REALTIME_SIGNALS = 0x31069 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x31069 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_SPIN_LOCKS = 0x31069 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x31069 + _POSIX_THREAD_ATTR_STACKSIZE = 0x31069 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_PRIO_INHERIT = 0x31069 + _POSIX_THREAD_PRIO_PROTECT = 0x31069 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x31069 + _POSIX_THREAD_PROCESS_SHARED = 0x31069 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x31069 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x31069 + _POSIX_TIMEOUTS = 0x31069 + _POSIX_TIMERS = 0x31069 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = 0x1 + _POSIX_V7_ILP32_OFFBIG = 0x1 + _POSIX_V7_LP64_OFF64 = -0x1 + _POSIX_V7_LPBIG_OFFBIG = -0x1 + + _POSIX_V6_ILP32_OFF32 = 0x1 + _POSIX_V6_ILP32_OFFBIG = 0x1 + _POSIX_V6_LP64_OFF64 = -0x1 + _POSIX_V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_C_VERSION = 0x31069 + _POSIX2_CHAR_TERM = 0x31069 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_VERSION = 0x31069 + + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = 0x1 + _XOPEN_REALTIME_THREADS = 0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x2bc + _XOPEN_XCU_VERSION = 0x4 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm64.go new file mode 100644 index 0000000000000..362403abccd5f --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_arm64.go @@ -0,0 +1,114 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_linux.go + +//go:build linux && arm64 +// +build linux,arm64 + +package sysconf + +const ( + _AIO_PRIO_DELTA_MAX = 0x14 + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xff + _DELAYTIMER_MAX = 0x7fffffff + _EXPR_NEST_MAX = 0x20 + _HOST_NAME_MAX = 0x40 + _LINE_MAX = 0x800 + _LOGIN_NAME_MAX = 0x100 + _MQ_PRIO_MAX = 0x8000 + _NGROUPS_MAX = 0x10000 + _NSS_BUFLEN_GROUP = 0x400 + _NSS_BUFLEN_PASSWD = 0x400 + _OPEN_MAX = 0x100 + _PTHREAD_KEYS_MAX = 0x400 + _PTHREAD_STACK_MIN = 0x20000 + _RE_DUP_MAX = 0x7fff + _RTSIG_MAX = 0x20 + _SEM_VALUE_MAX = 0x7fffffff + _STREAM_MAX = 0x10 + _SYMLOOP_MAX = -0x1 + _TTY_NAME_MAX = 0x20 + + _UIO_MAXIOV = 0x400 + + _INT_MAX = 0x7fffffff + + _POSIX_ADVISORY_INFO = 0x31069 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x31069 + _POSIX_BARRIERS = 0x31069 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = 0x31069 + _POSIX_CPUTIME = 0x0 + _POSIX_FSYNC = 0x31069 + _POSIX_IPV6 = 0x31069 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x31069 + _POSIX_MEMLOCK = 0x31069 + _POSIX_MEMLOCK_RANGE = 0x31069 + _POSIX_MEMORY_PROTECTION = 0x31069 + _POSIX_MESSAGE_PASSING = 0x31069 + _POSIX_MONOTONIC_CLOCK = 0x0 + _POSIX_PRIORITIZED_IO = 0x31069 + _POSIX_PRIORITY_SCHEDULING = 0x31069 + _POSIX_RAW_SOCKETS = 0x31069 + _POSIX_READER_WRITER_LOCKS = 0x31069 + _POSIX_REALTIME_SIGNALS = 0x31069 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x31069 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_SPIN_LOCKS = 0x31069 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x31069 + _POSIX_THREAD_ATTR_STACKSIZE = 0x31069 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_PRIO_INHERIT = 0x31069 + _POSIX_THREAD_PRIO_PROTECT = 0x31069 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x31069 + _POSIX_THREAD_PROCESS_SHARED = 0x31069 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x31069 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x31069 + _POSIX_TIMEOUTS = 0x31069 + _POSIX_TIMERS = 0x31069 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = -0x1 + _POSIX_V7_ILP32_OFFBIG = -0x1 + _POSIX_V7_LP64_OFF64 = 0x1 + _POSIX_V7_LPBIG_OFFBIG = -0x1 + + _POSIX_V6_ILP32_OFF32 = -0x1 + _POSIX_V6_ILP32_OFFBIG = -0x1 + _POSIX_V6_LP64_OFF64 = 0x1 + _POSIX_V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_C_VERSION = 0x31069 + _POSIX2_CHAR_TERM = 0x31069 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_VERSION = 0x31069 + + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = 0x1 + _XOPEN_REALTIME_THREADS = 0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x2bc + _XOPEN_XCU_VERSION = 0x4 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go new file mode 100644 index 0000000000000..95a71f4a2c71c --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_loong64.go @@ -0,0 +1,114 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_linux.go + +//go:build linux && loong64 +// +build linux,loong64 + +package sysconf + +const ( + _AIO_PRIO_DELTA_MAX = 0x14 + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xff + _DELAYTIMER_MAX = 0x7fffffff + _EXPR_NEST_MAX = 0x20 + _HOST_NAME_MAX = 0x40 + _LINE_MAX = 0x800 + _LOGIN_NAME_MAX = 0x100 + _MQ_PRIO_MAX = 0x8000 + _NGROUPS_MAX = 0x10000 + _NSS_BUFLEN_GROUP = 0x400 + _NSS_BUFLEN_PASSWD = 0x400 + _OPEN_MAX = 0x100 + _PTHREAD_KEYS_MAX = 0x400 + _PTHREAD_STACK_MIN = 0x20000 + _RE_DUP_MAX = 0x7fff + _RTSIG_MAX = 0x20 + _SEM_VALUE_MAX = 0x7fffffff + _STREAM_MAX = 0x10 + _SYMLOOP_MAX = -0x1 + _TTY_NAME_MAX = 0x20 + + _UIO_MAXIOV = 0x400 + + _INT_MAX = 0x7fffffff + + _POSIX_ADVISORY_INFO = 0x31069 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x31069 + _POSIX_BARRIERS = 0x31069 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = 0x31069 + _POSIX_CPUTIME = 0x0 + _POSIX_FSYNC = 0x31069 + _POSIX_IPV6 = 0x31069 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x31069 + _POSIX_MEMLOCK = 0x31069 + _POSIX_MEMLOCK_RANGE = 0x31069 + _POSIX_MEMORY_PROTECTION = 0x31069 + _POSIX_MESSAGE_PASSING = 0x31069 + _POSIX_MONOTONIC_CLOCK = 0x0 + _POSIX_PRIORITIZED_IO = 0x31069 + _POSIX_PRIORITY_SCHEDULING = 0x31069 + _POSIX_RAW_SOCKETS = 0x31069 + _POSIX_READER_WRITER_LOCKS = 0x31069 + _POSIX_REALTIME_SIGNALS = 0x31069 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x31069 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_SPIN_LOCKS = 0x31069 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x31069 + _POSIX_THREAD_ATTR_STACKSIZE = 0x31069 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_PRIO_INHERIT = 0x31069 + _POSIX_THREAD_PRIO_PROTECT = 0x31069 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x31069 + _POSIX_THREAD_PROCESS_SHARED = 0x31069 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x31069 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x31069 + _POSIX_TIMEOUTS = 0x31069 + _POSIX_TIMERS = 0x31069 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = -0x1 + _POSIX_V7_ILP32_OFFBIG = -0x1 + _POSIX_V7_LP64_OFF64 = 0x1 + _POSIX_V7_LPBIG_OFFBIG = -0x1 + + _POSIX_V6_ILP32_OFF32 = -0x1 + _POSIX_V6_ILP32_OFFBIG = -0x1 + _POSIX_V6_LP64_OFF64 = 0x1 + _POSIX_V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_C_VERSION = 0x31069 + _POSIX2_CHAR_TERM = 0x31069 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_VERSION = 0x31069 + + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = 0x1 + _XOPEN_REALTIME_THREADS = 0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x2bc + _XOPEN_XCU_VERSION = 0x4 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips.go new file mode 100644 index 0000000000000..868b0ffb33669 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips.go @@ -0,0 +1,114 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_linux.go + +//go:build linux && mips +// +build linux,mips + +package sysconf + +const ( + _AIO_PRIO_DELTA_MAX = 0x14 + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xff + _DELAYTIMER_MAX = 0x7fffffff + _EXPR_NEST_MAX = 0x20 + _HOST_NAME_MAX = 0x40 + _LINE_MAX = 0x800 + _LOGIN_NAME_MAX = 0x100 + _MQ_PRIO_MAX = 0x8000 + _NGROUPS_MAX = 0x10000 + _NSS_BUFLEN_GROUP = 0x400 + _NSS_BUFLEN_PASSWD = 0x400 + _OPEN_MAX = 0x100 + _PTHREAD_KEYS_MAX = 0x400 + _PTHREAD_STACK_MIN = 0x20000 + _RE_DUP_MAX = 0x7fff + _RTSIG_MAX = 0x20 + _SEM_VALUE_MAX = 0x7fffffff + _STREAM_MAX = 0x10 + _SYMLOOP_MAX = -0x1 + _TTY_NAME_MAX = 0x20 + + _UIO_MAXIOV = 0x400 + + _INT_MAX = 0x7fffffff + + _POSIX_ADVISORY_INFO = 0x31069 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x31069 + _POSIX_BARRIERS = 0x31069 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = 0x31069 + _POSIX_CPUTIME = 0x0 + _POSIX_FSYNC = 0x31069 + _POSIX_IPV6 = 0x31069 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x31069 + _POSIX_MEMLOCK = 0x31069 + _POSIX_MEMLOCK_RANGE = 0x31069 + _POSIX_MEMORY_PROTECTION = 0x31069 + _POSIX_MESSAGE_PASSING = 0x31069 + _POSIX_MONOTONIC_CLOCK = 0x0 + _POSIX_PRIORITIZED_IO = 0x31069 + _POSIX_PRIORITY_SCHEDULING = 0x31069 + _POSIX_RAW_SOCKETS = 0x31069 + _POSIX_READER_WRITER_LOCKS = 0x31069 + _POSIX_REALTIME_SIGNALS = 0x31069 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x31069 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_SPIN_LOCKS = 0x31069 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x31069 + _POSIX_THREAD_ATTR_STACKSIZE = 0x31069 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_PRIO_INHERIT = 0x31069 + _POSIX_THREAD_PRIO_PROTECT = 0x31069 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x31069 + _POSIX_THREAD_PROCESS_SHARED = 0x31069 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x31069 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x31069 + _POSIX_TIMEOUTS = 0x31069 + _POSIX_TIMERS = 0x31069 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = 0x1 + _POSIX_V7_ILP32_OFFBIG = 0x1 + _POSIX_V7_LP64_OFF64 = -0x1 + _POSIX_V7_LPBIG_OFFBIG = -0x1 + + _POSIX_V6_ILP32_OFF32 = 0x1 + _POSIX_V6_ILP32_OFFBIG = 0x1 + _POSIX_V6_LP64_OFF64 = -0x1 + _POSIX_V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_C_VERSION = 0x31069 + _POSIX2_CHAR_TERM = 0x31069 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_VERSION = 0x31069 + + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = 0x1 + _XOPEN_REALTIME_THREADS = 0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x2bc + _XOPEN_XCU_VERSION = 0x4 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64.go new file mode 100644 index 0000000000000..5949f3d71fdfe --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64.go @@ -0,0 +1,114 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_linux.go + +//go:build linux && mips64 +// +build linux,mips64 + +package sysconf + +const ( + _AIO_PRIO_DELTA_MAX = 0x14 + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xff + _DELAYTIMER_MAX = 0x7fffffff + _EXPR_NEST_MAX = 0x20 + _HOST_NAME_MAX = 0x40 + _LINE_MAX = 0x800 + _LOGIN_NAME_MAX = 0x100 + _MQ_PRIO_MAX = 0x8000 + _NGROUPS_MAX = 0x10000 + _NSS_BUFLEN_GROUP = 0x400 + _NSS_BUFLEN_PASSWD = 0x400 + _OPEN_MAX = 0x100 + _PTHREAD_KEYS_MAX = 0x400 + _PTHREAD_STACK_MIN = 0x20000 + _RE_DUP_MAX = 0x7fff + _RTSIG_MAX = 0x20 + _SEM_VALUE_MAX = 0x7fffffff + _STREAM_MAX = 0x10 + _SYMLOOP_MAX = -0x1 + _TTY_NAME_MAX = 0x20 + + _UIO_MAXIOV = 0x400 + + _INT_MAX = 0x7fffffff + + _POSIX_ADVISORY_INFO = 0x31069 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x31069 + _POSIX_BARRIERS = 0x31069 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = 0x31069 + _POSIX_CPUTIME = 0x0 + _POSIX_FSYNC = 0x31069 + _POSIX_IPV6 = 0x31069 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x31069 + _POSIX_MEMLOCK = 0x31069 + _POSIX_MEMLOCK_RANGE = 0x31069 + _POSIX_MEMORY_PROTECTION = 0x31069 + _POSIX_MESSAGE_PASSING = 0x31069 + _POSIX_MONOTONIC_CLOCK = 0x0 + _POSIX_PRIORITIZED_IO = 0x31069 + _POSIX_PRIORITY_SCHEDULING = 0x31069 + _POSIX_RAW_SOCKETS = 0x31069 + _POSIX_READER_WRITER_LOCKS = 0x31069 + _POSIX_REALTIME_SIGNALS = 0x31069 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x31069 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_SPIN_LOCKS = 0x31069 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x31069 + _POSIX_THREAD_ATTR_STACKSIZE = 0x31069 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_PRIO_INHERIT = 0x31069 + _POSIX_THREAD_PRIO_PROTECT = 0x31069 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x31069 + _POSIX_THREAD_PROCESS_SHARED = 0x31069 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x31069 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x31069 + _POSIX_TIMEOUTS = 0x31069 + _POSIX_TIMERS = 0x31069 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = -0x1 + _POSIX_V7_ILP32_OFFBIG = -0x1 + _POSIX_V7_LP64_OFF64 = 0x1 + _POSIX_V7_LPBIG_OFFBIG = -0x1 + + _POSIX_V6_ILP32_OFF32 = -0x1 + _POSIX_V6_ILP32_OFFBIG = -0x1 + _POSIX_V6_LP64_OFF64 = 0x1 + _POSIX_V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_C_VERSION = 0x31069 + _POSIX2_CHAR_TERM = 0x31069 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_VERSION = 0x31069 + + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = 0x1 + _XOPEN_REALTIME_THREADS = 0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x2bc + _XOPEN_XCU_VERSION = 0x4 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64le.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64le.go new file mode 100644 index 0000000000000..1853419a32d0f --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mips64le.go @@ -0,0 +1,114 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_linux.go + +//go:build linux && mips64le +// +build linux,mips64le + +package sysconf + +const ( + _AIO_PRIO_DELTA_MAX = 0x14 + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xff + _DELAYTIMER_MAX = 0x7fffffff + _EXPR_NEST_MAX = 0x20 + _HOST_NAME_MAX = 0x40 + _LINE_MAX = 0x800 + _LOGIN_NAME_MAX = 0x100 + _MQ_PRIO_MAX = 0x8000 + _NGROUPS_MAX = 0x10000 + _NSS_BUFLEN_GROUP = 0x400 + _NSS_BUFLEN_PASSWD = 0x400 + _OPEN_MAX = 0x100 + _PTHREAD_KEYS_MAX = 0x400 + _PTHREAD_STACK_MIN = 0x20000 + _RE_DUP_MAX = 0x7fff + _RTSIG_MAX = 0x20 + _SEM_VALUE_MAX = 0x7fffffff + _STREAM_MAX = 0x10 + _SYMLOOP_MAX = -0x1 + _TTY_NAME_MAX = 0x20 + + _UIO_MAXIOV = 0x400 + + _INT_MAX = 0x7fffffff + + _POSIX_ADVISORY_INFO = 0x31069 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x31069 + _POSIX_BARRIERS = 0x31069 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = 0x31069 + _POSIX_CPUTIME = 0x0 + _POSIX_FSYNC = 0x31069 + _POSIX_IPV6 = 0x31069 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x31069 + _POSIX_MEMLOCK = 0x31069 + _POSIX_MEMLOCK_RANGE = 0x31069 + _POSIX_MEMORY_PROTECTION = 0x31069 + _POSIX_MESSAGE_PASSING = 0x31069 + _POSIX_MONOTONIC_CLOCK = 0x0 + _POSIX_PRIORITIZED_IO = 0x31069 + _POSIX_PRIORITY_SCHEDULING = 0x31069 + _POSIX_RAW_SOCKETS = 0x31069 + _POSIX_READER_WRITER_LOCKS = 0x31069 + _POSIX_REALTIME_SIGNALS = 0x31069 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x31069 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_SPIN_LOCKS = 0x31069 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x31069 + _POSIX_THREAD_ATTR_STACKSIZE = 0x31069 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_PRIO_INHERIT = 0x31069 + _POSIX_THREAD_PRIO_PROTECT = 0x31069 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x31069 + _POSIX_THREAD_PROCESS_SHARED = 0x31069 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x31069 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x31069 + _POSIX_TIMEOUTS = 0x31069 + _POSIX_TIMERS = 0x31069 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = -0x1 + _POSIX_V7_ILP32_OFFBIG = -0x1 + _POSIX_V7_LP64_OFF64 = 0x1 + _POSIX_V7_LPBIG_OFFBIG = -0x1 + + _POSIX_V6_ILP32_OFF32 = -0x1 + _POSIX_V6_ILP32_OFFBIG = -0x1 + _POSIX_V6_LP64_OFF64 = 0x1 + _POSIX_V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_C_VERSION = 0x31069 + _POSIX2_CHAR_TERM = 0x31069 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_VERSION = 0x31069 + + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = 0x1 + _XOPEN_REALTIME_THREADS = 0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x2bc + _XOPEN_XCU_VERSION = 0x4 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mipsle.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mipsle.go new file mode 100644 index 0000000000000..ff41b3469bb6e --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_mipsle.go @@ -0,0 +1,114 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_linux.go + +//go:build linux && mipsle +// +build linux,mipsle + +package sysconf + +const ( + _AIO_PRIO_DELTA_MAX = 0x14 + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xff + _DELAYTIMER_MAX = 0x7fffffff + _EXPR_NEST_MAX = 0x20 + _HOST_NAME_MAX = 0x40 + _LINE_MAX = 0x800 + _LOGIN_NAME_MAX = 0x100 + _MQ_PRIO_MAX = 0x8000 + _NGROUPS_MAX = 0x10000 + _NSS_BUFLEN_GROUP = 0x400 + _NSS_BUFLEN_PASSWD = 0x400 + _OPEN_MAX = 0x100 + _PTHREAD_KEYS_MAX = 0x400 + _PTHREAD_STACK_MIN = 0x20000 + _RE_DUP_MAX = 0x7fff + _RTSIG_MAX = 0x20 + _SEM_VALUE_MAX = 0x7fffffff + _STREAM_MAX = 0x10 + _SYMLOOP_MAX = -0x1 + _TTY_NAME_MAX = 0x20 + + _UIO_MAXIOV = 0x400 + + _INT_MAX = 0x7fffffff + + _POSIX_ADVISORY_INFO = 0x31069 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x31069 + _POSIX_BARRIERS = 0x31069 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = 0x31069 + _POSIX_CPUTIME = 0x0 + _POSIX_FSYNC = 0x31069 + _POSIX_IPV6 = 0x31069 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x31069 + _POSIX_MEMLOCK = 0x31069 + _POSIX_MEMLOCK_RANGE = 0x31069 + _POSIX_MEMORY_PROTECTION = 0x31069 + _POSIX_MESSAGE_PASSING = 0x31069 + _POSIX_MONOTONIC_CLOCK = 0x0 + _POSIX_PRIORITIZED_IO = 0x31069 + _POSIX_PRIORITY_SCHEDULING = 0x31069 + _POSIX_RAW_SOCKETS = 0x31069 + _POSIX_READER_WRITER_LOCKS = 0x31069 + _POSIX_REALTIME_SIGNALS = 0x31069 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x31069 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_SPIN_LOCKS = 0x31069 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x31069 + _POSIX_THREAD_ATTR_STACKSIZE = 0x31069 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_PRIO_INHERIT = 0x31069 + _POSIX_THREAD_PRIO_PROTECT = 0x31069 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x31069 + _POSIX_THREAD_PROCESS_SHARED = 0x31069 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x31069 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x31069 + _POSIX_TIMEOUTS = 0x31069 + _POSIX_TIMERS = 0x31069 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = 0x1 + _POSIX_V7_ILP32_OFFBIG = 0x1 + _POSIX_V7_LP64_OFF64 = -0x1 + _POSIX_V7_LPBIG_OFFBIG = -0x1 + + _POSIX_V6_ILP32_OFF32 = 0x1 + _POSIX_V6_ILP32_OFFBIG = 0x1 + _POSIX_V6_LP64_OFF64 = -0x1 + _POSIX_V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_C_VERSION = 0x31069 + _POSIX2_CHAR_TERM = 0x31069 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_VERSION = 0x31069 + + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = 0x1 + _XOPEN_REALTIME_THREADS = 0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x2bc + _XOPEN_XCU_VERSION = 0x4 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64.go new file mode 100644 index 0000000000000..388743728ac67 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64.go @@ -0,0 +1,114 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_linux.go + +//go:build linux && ppc64 +// +build linux,ppc64 + +package sysconf + +const ( + _AIO_PRIO_DELTA_MAX = 0x14 + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xff + _DELAYTIMER_MAX = 0x7fffffff + _EXPR_NEST_MAX = 0x20 + _HOST_NAME_MAX = 0x40 + _LINE_MAX = 0x800 + _LOGIN_NAME_MAX = 0x100 + _MQ_PRIO_MAX = 0x8000 + _NGROUPS_MAX = 0x10000 + _NSS_BUFLEN_GROUP = 0x400 + _NSS_BUFLEN_PASSWD = 0x400 + _OPEN_MAX = 0x100 + _PTHREAD_KEYS_MAX = 0x400 + _PTHREAD_STACK_MIN = 0x20000 + _RE_DUP_MAX = 0x7fff + _RTSIG_MAX = 0x20 + _SEM_VALUE_MAX = 0x7fffffff + _STREAM_MAX = 0x10 + _SYMLOOP_MAX = -0x1 + _TTY_NAME_MAX = 0x20 + + _UIO_MAXIOV = 0x400 + + _INT_MAX = 0x7fffffff + + _POSIX_ADVISORY_INFO = 0x31069 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x31069 + _POSIX_BARRIERS = 0x31069 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = 0x31069 + _POSIX_CPUTIME = 0x0 + _POSIX_FSYNC = 0x31069 + _POSIX_IPV6 = 0x31069 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x31069 + _POSIX_MEMLOCK = 0x31069 + _POSIX_MEMLOCK_RANGE = 0x31069 + _POSIX_MEMORY_PROTECTION = 0x31069 + _POSIX_MESSAGE_PASSING = 0x31069 + _POSIX_MONOTONIC_CLOCK = 0x0 + _POSIX_PRIORITIZED_IO = 0x31069 + _POSIX_PRIORITY_SCHEDULING = 0x31069 + _POSIX_RAW_SOCKETS = 0x31069 + _POSIX_READER_WRITER_LOCKS = 0x31069 + _POSIX_REALTIME_SIGNALS = 0x31069 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x31069 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_SPIN_LOCKS = 0x31069 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x31069 + _POSIX_THREAD_ATTR_STACKSIZE = 0x31069 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_PRIO_INHERIT = 0x31069 + _POSIX_THREAD_PRIO_PROTECT = 0x31069 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x31069 + _POSIX_THREAD_PROCESS_SHARED = 0x31069 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x31069 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x31069 + _POSIX_TIMEOUTS = 0x31069 + _POSIX_TIMERS = 0x31069 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = -0x1 + _POSIX_V7_ILP32_OFFBIG = -0x1 + _POSIX_V7_LP64_OFF64 = 0x1 + _POSIX_V7_LPBIG_OFFBIG = -0x1 + + _POSIX_V6_ILP32_OFF32 = -0x1 + _POSIX_V6_ILP32_OFFBIG = -0x1 + _POSIX_V6_LP64_OFF64 = 0x1 + _POSIX_V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_C_VERSION = 0x31069 + _POSIX2_CHAR_TERM = 0x31069 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_VERSION = 0x31069 + + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = 0x1 + _XOPEN_REALTIME_THREADS = 0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x2bc + _XOPEN_XCU_VERSION = 0x4 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64le.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64le.go new file mode 100644 index 0000000000000..6d76929a64c68 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_ppc64le.go @@ -0,0 +1,114 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_linux.go + +//go:build linux && ppc64le +// +build linux,ppc64le + +package sysconf + +const ( + _AIO_PRIO_DELTA_MAX = 0x14 + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xff + _DELAYTIMER_MAX = 0x7fffffff + _EXPR_NEST_MAX = 0x20 + _HOST_NAME_MAX = 0x40 + _LINE_MAX = 0x800 + _LOGIN_NAME_MAX = 0x100 + _MQ_PRIO_MAX = 0x8000 + _NGROUPS_MAX = 0x10000 + _NSS_BUFLEN_GROUP = 0x400 + _NSS_BUFLEN_PASSWD = 0x400 + _OPEN_MAX = 0x100 + _PTHREAD_KEYS_MAX = 0x400 + _PTHREAD_STACK_MIN = 0x20000 + _RE_DUP_MAX = 0x7fff + _RTSIG_MAX = 0x20 + _SEM_VALUE_MAX = 0x7fffffff + _STREAM_MAX = 0x10 + _SYMLOOP_MAX = -0x1 + _TTY_NAME_MAX = 0x20 + + _UIO_MAXIOV = 0x400 + + _INT_MAX = 0x7fffffff + + _POSIX_ADVISORY_INFO = 0x31069 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x31069 + _POSIX_BARRIERS = 0x31069 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = 0x31069 + _POSIX_CPUTIME = 0x0 + _POSIX_FSYNC = 0x31069 + _POSIX_IPV6 = 0x31069 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x31069 + _POSIX_MEMLOCK = 0x31069 + _POSIX_MEMLOCK_RANGE = 0x31069 + _POSIX_MEMORY_PROTECTION = 0x31069 + _POSIX_MESSAGE_PASSING = 0x31069 + _POSIX_MONOTONIC_CLOCK = 0x0 + _POSIX_PRIORITIZED_IO = 0x31069 + _POSIX_PRIORITY_SCHEDULING = 0x31069 + _POSIX_RAW_SOCKETS = 0x31069 + _POSIX_READER_WRITER_LOCKS = 0x31069 + _POSIX_REALTIME_SIGNALS = 0x31069 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x31069 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_SPIN_LOCKS = 0x31069 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x31069 + _POSIX_THREAD_ATTR_STACKSIZE = 0x31069 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_PRIO_INHERIT = 0x31069 + _POSIX_THREAD_PRIO_PROTECT = 0x31069 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x31069 + _POSIX_THREAD_PROCESS_SHARED = 0x31069 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x31069 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x31069 + _POSIX_TIMEOUTS = 0x31069 + _POSIX_TIMERS = 0x31069 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = -0x1 + _POSIX_V7_ILP32_OFFBIG = -0x1 + _POSIX_V7_LP64_OFF64 = 0x1 + _POSIX_V7_LPBIG_OFFBIG = -0x1 + + _POSIX_V6_ILP32_OFF32 = -0x1 + _POSIX_V6_ILP32_OFFBIG = -0x1 + _POSIX_V6_LP64_OFF64 = 0x1 + _POSIX_V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_C_VERSION = 0x31069 + _POSIX2_CHAR_TERM = 0x31069 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_VERSION = 0x31069 + + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = 0x1 + _XOPEN_REALTIME_THREADS = 0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x2bc + _XOPEN_XCU_VERSION = 0x4 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_riscv64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_riscv64.go new file mode 100644 index 0000000000000..3d7d71b322170 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_riscv64.go @@ -0,0 +1,114 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_linux.go + +//go:build linux && riscv64 +// +build linux,riscv64 + +package sysconf + +const ( + _AIO_PRIO_DELTA_MAX = 0x14 + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xff + _DELAYTIMER_MAX = 0x7fffffff + _EXPR_NEST_MAX = 0x20 + _HOST_NAME_MAX = 0x40 + _LINE_MAX = 0x800 + _LOGIN_NAME_MAX = 0x100 + _MQ_PRIO_MAX = 0x8000 + _NGROUPS_MAX = 0x10000 + _NSS_BUFLEN_GROUP = 0x400 + _NSS_BUFLEN_PASSWD = 0x400 + _OPEN_MAX = 0x100 + _PTHREAD_KEYS_MAX = 0x400 + _PTHREAD_STACK_MIN = 0x4000 + _RE_DUP_MAX = 0x7fff + _RTSIG_MAX = 0x20 + _SEM_VALUE_MAX = 0x7fffffff + _STREAM_MAX = 0x10 + _SYMLOOP_MAX = -0x1 + _TTY_NAME_MAX = 0x20 + + _UIO_MAXIOV = 0x400 + + _INT_MAX = 0x7fffffff + + _POSIX_ADVISORY_INFO = 0x31069 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x31069 + _POSIX_BARRIERS = 0x31069 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = 0x31069 + _POSIX_CPUTIME = 0x0 + _POSIX_FSYNC = 0x31069 + _POSIX_IPV6 = 0x31069 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x31069 + _POSIX_MEMLOCK = 0x31069 + _POSIX_MEMLOCK_RANGE = 0x31069 + _POSIX_MEMORY_PROTECTION = 0x31069 + _POSIX_MESSAGE_PASSING = 0x31069 + _POSIX_MONOTONIC_CLOCK = 0x0 + _POSIX_PRIORITIZED_IO = 0x31069 + _POSIX_PRIORITY_SCHEDULING = 0x31069 + _POSIX_RAW_SOCKETS = 0x31069 + _POSIX_READER_WRITER_LOCKS = 0x31069 + _POSIX_REALTIME_SIGNALS = 0x31069 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x31069 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_SPIN_LOCKS = 0x31069 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x31069 + _POSIX_THREAD_ATTR_STACKSIZE = 0x31069 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_PRIO_INHERIT = 0x31069 + _POSIX_THREAD_PRIO_PROTECT = 0x31069 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x31069 + _POSIX_THREAD_PROCESS_SHARED = 0x31069 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x31069 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x31069 + _POSIX_TIMEOUTS = 0x31069 + _POSIX_TIMERS = 0x31069 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = -0x1 + _POSIX_V7_ILP32_OFFBIG = -0x1 + _POSIX_V7_LP64_OFF64 = 0x1 + _POSIX_V7_LPBIG_OFFBIG = -0x1 + + _POSIX_V6_ILP32_OFF32 = -0x1 + _POSIX_V6_ILP32_OFFBIG = -0x1 + _POSIX_V6_LP64_OFF64 = 0x1 + _POSIX_V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_C_VERSION = 0x31069 + _POSIX2_CHAR_TERM = 0x31069 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_VERSION = 0x31069 + + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = 0x1 + _XOPEN_REALTIME_THREADS = 0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x2bc + _XOPEN_XCU_VERSION = 0x4 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_s390x.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_s390x.go new file mode 100644 index 0000000000000..9cf8529f53acc --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_linux_s390x.go @@ -0,0 +1,114 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_linux.go + +//go:build linux && s390x +// +build linux,s390x + +package sysconf + +const ( + _AIO_PRIO_DELTA_MAX = 0x14 + _BC_BASE_MAX = 0x63 + _BC_DIM_MAX = 0x800 + _BC_SCALE_MAX = 0x63 + _BC_STRING_MAX = 0x3e8 + _COLL_WEIGHTS_MAX = 0xff + _DELAYTIMER_MAX = 0x7fffffff + _EXPR_NEST_MAX = 0x20 + _HOST_NAME_MAX = 0x40 + _LINE_MAX = 0x800 + _LOGIN_NAME_MAX = 0x100 + _MQ_PRIO_MAX = 0x8000 + _NGROUPS_MAX = 0x10000 + _NSS_BUFLEN_GROUP = 0x400 + _NSS_BUFLEN_PASSWD = 0x400 + _OPEN_MAX = 0x100 + _PTHREAD_KEYS_MAX = 0x400 + _PTHREAD_STACK_MIN = 0x4000 + _RE_DUP_MAX = 0x7fff + _RTSIG_MAX = 0x20 + _SEM_VALUE_MAX = 0x7fffffff + _STREAM_MAX = 0x10 + _SYMLOOP_MAX = -0x1 + _TTY_NAME_MAX = 0x20 + + _UIO_MAXIOV = 0x400 + + _INT_MAX = 0x7fffffff + + _POSIX_ADVISORY_INFO = 0x31069 + _POSIX_ARG_MAX = 0x1000 + _POSIX_ASYNCHRONOUS_IO = 0x31069 + _POSIX_BARRIERS = 0x31069 + _POSIX_CHILD_MAX = 0x19 + _POSIX_CLOCK_SELECTION = 0x31069 + _POSIX_CPUTIME = 0x0 + _POSIX_FSYNC = 0x31069 + _POSIX_IPV6 = 0x31069 + _POSIX_JOB_CONTROL = 0x1 + _POSIX_MAPPED_FILES = 0x31069 + _POSIX_MEMLOCK = 0x31069 + _POSIX_MEMLOCK_RANGE = 0x31069 + _POSIX_MEMORY_PROTECTION = 0x31069 + _POSIX_MESSAGE_PASSING = 0x31069 + _POSIX_MONOTONIC_CLOCK = 0x0 + _POSIX_PRIORITIZED_IO = 0x31069 + _POSIX_PRIORITY_SCHEDULING = 0x31069 + _POSIX_RAW_SOCKETS = 0x31069 + _POSIX_READER_WRITER_LOCKS = 0x31069 + _POSIX_REALTIME_SIGNALS = 0x31069 + _POSIX_REGEXP = 0x1 + _POSIX_SAVED_IDS = 0x1 + _POSIX_SEMAPHORES = 0x31069 + _POSIX_SHARED_MEMORY_OBJECTS = 0x31069 + _POSIX_SHELL = 0x1 + _POSIX_SIGQUEUE_MAX = 0x20 + _POSIX_SPAWN = 0x31069 + _POSIX_SPIN_LOCKS = 0x31069 + _POSIX_SPORADIC_SERVER = -0x1 + _POSIX_SYNCHRONIZED_IO = 0x31069 + _POSIX_THREAD_ATTR_STACKADDR = 0x31069 + _POSIX_THREAD_ATTR_STACKSIZE = 0x31069 + _POSIX_THREAD_DESTRUCTOR_ITERATIONS = 0x4 + _POSIX_THREAD_PRIO_INHERIT = 0x31069 + _POSIX_THREAD_PRIO_PROTECT = 0x31069 + _POSIX_THREAD_PRIORITY_SCHEDULING = 0x31069 + _POSIX_THREAD_PROCESS_SHARED = 0x31069 + _POSIX_THREAD_SAFE_FUNCTIONS = 0x31069 + _POSIX_THREAD_SPORADIC_SERVER = -0x1 + _POSIX_THREADS = 0x31069 + _POSIX_TIMEOUTS = 0x31069 + _POSIX_TIMERS = 0x31069 + _POSIX_TRACE = -0x1 + _POSIX_TRACE_EVENT_FILTER = -0x1 + _POSIX_TRACE_INHERIT = -0x1 + _POSIX_TRACE_LOG = -0x1 + _POSIX_TYPED_MEMORY_OBJECTS = -0x1 + _POSIX_VERSION = 0x31069 + + _POSIX_V7_ILP32_OFF32 = -0x1 + _POSIX_V7_ILP32_OFFBIG = -0x1 + _POSIX_V7_LP64_OFF64 = 0x1 + _POSIX_V7_LPBIG_OFFBIG = -0x1 + + _POSIX_V6_ILP32_OFF32 = -0x1 + _POSIX_V6_ILP32_OFFBIG = -0x1 + _POSIX_V6_LP64_OFF64 = 0x1 + _POSIX_V6_LPBIG_OFFBIG = -0x1 + + _POSIX2_C_BIND = 0x31069 + _POSIX2_C_DEV = 0x31069 + _POSIX2_C_VERSION = 0x31069 + _POSIX2_CHAR_TERM = 0x31069 + _POSIX2_LOCALEDEF = 0x31069 + _POSIX2_SW_DEV = 0x31069 + _POSIX2_VERSION = 0x31069 + + _XOPEN_ENH_I18N = 0x1 + _XOPEN_REALTIME = 0x1 + _XOPEN_REALTIME_THREADS = 0x1 + _XOPEN_SHM = 0x1 + _XOPEN_UNIX = 0x1 + _XOPEN_VERSION = 0x2bc + _XOPEN_XCU_VERSION = 0x4 +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go new file mode 100644 index 0000000000000..3cd64dd6626cd --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_386.go @@ -0,0 +1,11 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_netbsd.go + +//go:build netbsd && 386 +// +build netbsd,386 + +package sysconf + +const ( + _LONG_MAX = 0x7fffffff +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go new file mode 100644 index 0000000000000..02fc1d0ef935b --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_amd64.go @@ -0,0 +1,11 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_netbsd.go + +//go:build netbsd && amd64 +// +build netbsd,amd64 + +package sysconf + +const ( + _LONG_MAX = 0x7fffffffffffffff +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go new file mode 100644 index 0000000000000..16f9b6e71eb4c --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm.go @@ -0,0 +1,11 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_netbsd.go + +//go:build netbsd && arm +// +build netbsd,arm + +package sysconf + +const ( + _LONG_MAX = 0x7fffffff +) diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go new file mode 100644 index 0000000000000..e530339ca7e42 --- /dev/null +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_values_netbsd_arm64.go @@ -0,0 +1,11 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs sysconf_values_netbsd.go + +//go:build netbsd && arm64 +// +build netbsd,arm64 + +package sysconf + +const ( + _LONG_MAX = 0x7fffffffffffffff +) diff --git a/vendor/github.com/tklauser/numcpus/.cirrus.yml b/vendor/github.com/tklauser/numcpus/.cirrus.yml new file mode 100644 index 0000000000000..69c6ced5c75ed --- /dev/null +++ b/vendor/github.com/tklauser/numcpus/.cirrus.yml @@ -0,0 +1,13 @@ +env: + CIRRUS_CLONE_DEPTH: 1 + GO_VERSION: go1.20 + +freebsd_12_task: + freebsd_instance: + image_family: freebsd-12-3 + install_script: | + pkg install -y go + GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest + bin/${GO_VERSION} download + build_script: bin/${GO_VERSION} build -buildvcs=false -v ./... + test_script: bin/${GO_VERSION} test -buildvcs=false -race ./... diff --git a/vendor/github.com/tklauser/numcpus/LICENSE b/vendor/github.com/tklauser/numcpus/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/github.com/tklauser/numcpus/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/tklauser/numcpus/README.md b/vendor/github.com/tklauser/numcpus/README.md new file mode 100644 index 0000000000000..23612c5418848 --- /dev/null +++ b/vendor/github.com/tklauser/numcpus/README.md @@ -0,0 +1,52 @@ +# numcpus + +[![Go Reference](https://pkg.go.dev/badge/github.com/tklauser/numcpus.svg)](https://pkg.go.dev/github.com/tklauser/numcpus) +[![GitHub Action Status](https://github.com/tklauser/numcpus/workflows/Tests/badge.svg)](https://github.com/tklauser/numcpus/actions?query=workflow%3ATests) + +Package numcpus provides information about the number of CPUs in the system. + +It gets the number of CPUs (online, offline, present, possible, configured or +kernel maximum) on Linux, Darwin, FreeBSD, NetBSD, OpenBSD, DragonflyBSD or +Solaris/Illumos systems. + +On Linux, the information is retrieved by reading the corresponding CPU +topology files in `/sys/devices/system/cpu`. + +On BSD systems, the information is retrieved using the `hw.ncpu` and +`hw.ncpuonline` sysctls, if supported. + +Not all functions are supported on Darwin, FreeBSD, NetBSD, OpenBSD, +DragonflyBSD and Solaris/Illumos. ErrNotSupported is returned in case a +function is not supported on a particular platform. + +## Usage + +```Go +package main + +import ( + "fmt" + "os" + + "github.com/tklauser/numcpus" +) + +func main() { + online, err := numcpus.GetOnline() + if err != nil { + fmt.Fprintf(os.Stderr, "GetOnline: %v\n", err) + } + fmt.Printf("online CPUs: %v\n", online) + + possible, err := numcpus.GetPossible() + if err != nil { + fmt.Fprintf(os.Stderr, "GetPossible: %v\n", err) + } + fmt.Printf("possible CPUs: %v\n", possible) +} +``` + +## References + +* [Linux kernel sysfs documentation for CPU attributes](https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-devices-system-cpu) +* [Linux kernel CPU topology documentation](https://www.kernel.org/doc/Documentation/cputopology.txt) diff --git a/vendor/github.com/tklauser/numcpus/numcpus.go b/vendor/github.com/tklauser/numcpus/numcpus.go new file mode 100644 index 0000000000000..af59983e7d347 --- /dev/null +++ b/vendor/github.com/tklauser/numcpus/numcpus.go @@ -0,0 +1,75 @@ +// Copyright 2018-2022 Tobias Klauser +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package numcpus provides information about the number of CPUs in the system. +// +// It gets the number of CPUs (online, offline, present, possible or kernel +// maximum) on Linux, Darwin, FreeBSD, NetBSD, OpenBSD, DragonflyBSD, +// Solaris/Illumos or Windows systems. +// +// On Linux, the information is retrieved by reading the corresponding CPU +// topology files in /sys/devices/system/cpu. +// +// On BSD systems, the information is retrieved using the hw.ncpu and +// hw.ncpuonline sysctls, if supported. +// +// On Windows systems, the information is retrieved using the +// GetActiveProcessorCount and GetMaximumProcessorCount functions, respectively. +// +// Not all functions are supported on Darwin, FreeBSD, NetBSD, OpenBSD, +// DragonflyBSD, Solaris/Illumos and Windows. ErrNotSupported is returned in +// case a function is not supported on a particular platform. +package numcpus + +import "errors" + +// ErrNotSupported is the error returned when the function is not supported. +var ErrNotSupported = errors.New("function not supported") + +// GetConfigured returns the number of CPUs configured on the system. This +// function should return the same value as `getconf _SC_NPROCESSORS_CONF` on a +// unix system. +func GetConfigured() (int, error) { + return getConfigured() +} + +// GetKernelMax returns the maximum number of CPUs allowed by the kernel +// configuration. This function is only supported on Linux and Windows systems. +func GetKernelMax() (int, error) { + return getKernelMax() +} + +// GetOffline returns the number of offline CPUs, i.e. CPUs that are not online +// because they have been hotplugged off or exceed the limit of CPUs allowed by +// the kernel configuration (see GetKernelMax). This function is only supported +// on Linux systems. +func GetOffline() (int, error) { + return getOffline() +} + +// GetOnline returns the number of CPUs that are online and being scheduled. +func GetOnline() (int, error) { + return getOnline() +} + +// GetPossible returns the number of possible CPUs, i.e. CPUs that +// have been allocated resources and can be brought online if they are present. +func GetPossible() (int, error) { + return getPossible() +} + +// GetPresent returns the number of CPUs present in the system. +func GetPresent() (int, error) { + return getPresent() +} diff --git a/vendor/github.com/tklauser/numcpus/numcpus_bsd.go b/vendor/github.com/tklauser/numcpus/numcpus_bsd.go new file mode 100644 index 0000000000000..9e77e38e6e026 --- /dev/null +++ b/vendor/github.com/tklauser/numcpus/numcpus_bsd.go @@ -0,0 +1,66 @@ +// Copyright 2018 Tobias Klauser +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin || dragonfly || freebsd || netbsd || openbsd +// +build darwin dragonfly freebsd netbsd openbsd + +package numcpus + +import ( + "runtime" + + "golang.org/x/sys/unix" +) + +func getConfigured() (int, error) { + n, err := unix.SysctlUint32("hw.ncpu") + return int(n), err +} + +func getKernelMax() (int, error) { + if runtime.GOOS == "freebsd" { + n, err := unix.SysctlUint32("kern.smp.maxcpus") + return int(n), err + } + return 0, ErrNotSupported +} + +func getOffline() (int, error) { + return 0, ErrNotSupported +} + +func getOnline() (int, error) { + var n uint32 + var err error + switch runtime.GOOS { + case "netbsd", "openbsd": + n, err = unix.SysctlUint32("hw.ncpuonline") + if err != nil { + n, err = unix.SysctlUint32("hw.ncpu") + } + default: + n, err = unix.SysctlUint32("hw.ncpu") + } + return int(n), err +} + +func getPossible() (int, error) { + n, err := unix.SysctlUint32("hw.ncpu") + return int(n), err +} + +func getPresent() (int, error) { + n, err := unix.SysctlUint32("hw.ncpu") + return int(n), err +} diff --git a/vendor/github.com/tklauser/numcpus/numcpus_linux.go b/vendor/github.com/tklauser/numcpus/numcpus_linux.go new file mode 100644 index 0000000000000..1a30525b8734f --- /dev/null +++ b/vendor/github.com/tklauser/numcpus/numcpus_linux.go @@ -0,0 +1,120 @@ +// Copyright 2018 Tobias Klauser +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package numcpus + +import ( + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "golang.org/x/sys/unix" +) + +const sysfsCPUBasePath = "/sys/devices/system/cpu" + +func getFromCPUAffinity() (int, error) { + var cpuSet unix.CPUSet + if err := unix.SchedGetaffinity(0, &cpuSet); err != nil { + return 0, err + } + return cpuSet.Count(), nil +} + +func readCPURange(file string) (int, error) { + buf, err := ioutil.ReadFile(filepath.Join(sysfsCPUBasePath, file)) + if err != nil { + return 0, err + } + return parseCPURange(strings.Trim(string(buf), "\n ")) +} + +func parseCPURange(cpus string) (int, error) { + n := int(0) + for _, cpuRange := range strings.Split(cpus, ",") { + if len(cpuRange) == 0 { + continue + } + rangeOp := strings.SplitN(cpuRange, "-", 2) + first, err := strconv.ParseUint(rangeOp[0], 10, 32) + if err != nil { + return 0, err + } + if len(rangeOp) == 1 { + n++ + continue + } + last, err := strconv.ParseUint(rangeOp[1], 10, 32) + if err != nil { + return 0, err + } + n += int(last - first + 1) + } + return n, nil +} + +func getConfigured() (int, error) { + d, err := os.Open(sysfsCPUBasePath) + if err != nil { + return 0, err + } + defer d.Close() + fis, err := d.Readdir(-1) + if err != nil { + return 0, err + } + count := 0 + for _, fi := range fis { + if name := fi.Name(); fi.IsDir() && strings.HasPrefix(name, "cpu") { + _, err := strconv.ParseInt(name[3:], 10, 64) + if err == nil { + count++ + } + } + } + return count, nil +} + +func getKernelMax() (int, error) { + buf, err := ioutil.ReadFile(filepath.Join(sysfsCPUBasePath, "kernel_max")) + if err != nil { + return 0, err + } + n, err := strconv.ParseInt(strings.Trim(string(buf), "\n "), 10, 32) + if err != nil { + return 0, err + } + return int(n), nil +} + +func getOffline() (int, error) { + return readCPURange("offline") +} + +func getOnline() (int, error) { + if n, err := getFromCPUAffinity(); err == nil { + return n, nil + } + return readCPURange("online") +} + +func getPossible() (int, error) { + return readCPURange("possible") +} + +func getPresent() (int, error) { + return readCPURange("present") +} diff --git a/vendor/github.com/tklauser/numcpus/numcpus_solaris.go b/vendor/github.com/tklauser/numcpus/numcpus_solaris.go new file mode 100644 index 0000000000000..a264323781bf0 --- /dev/null +++ b/vendor/github.com/tklauser/numcpus/numcpus_solaris.go @@ -0,0 +1,56 @@ +// Copyright 2021 Tobias Klauser +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build solaris +// +build solaris + +package numcpus + +import "golang.org/x/sys/unix" + +// taken from /usr/include/sys/unistd.h +const ( + _SC_NPROCESSORS_CONF = 14 + _SC_NPROCESSORS_ONLN = 15 + _SC_NPROCESSORS_MAX = 516 +) + +func getConfigured() (int, error) { + n, err := unix.Sysconf(_SC_NPROCESSORS_CONF) + return int(n), err +} + +func getKernelMax() (int, error) { + n, err := unix.Sysconf(_SC_NPROCESSORS_MAX) + return int(n), err +} + +func getOffline() (int, error) { + return 0, ErrNotSupported +} + +func getOnline() (int, error) { + n, err := unix.Sysconf(_SC_NPROCESSORS_ONLN) + return int(n), err +} + +func getPossible() (int, error) { + n, err := unix.Sysconf(_SC_NPROCESSORS_CONF) + return int(n), err +} + +func getPresent() (int, error) { + n, err := unix.Sysconf(_SC_NPROCESSORS_CONF) + return int(n), err +} diff --git a/vendor/github.com/tklauser/numcpus/numcpus_unsupported.go b/vendor/github.com/tklauser/numcpus/numcpus_unsupported.go new file mode 100644 index 0000000000000..4a0b7c43d215b --- /dev/null +++ b/vendor/github.com/tklauser/numcpus/numcpus_unsupported.go @@ -0,0 +1,42 @@ +// Copyright 2021 Tobias Klauser +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package numcpus + +func getConfigured() (int, error) { + return 0, ErrNotSupported +} + +func getKernelMax() (int, error) { + return 0, ErrNotSupported +} + +func getOffline() (int, error) { + return 0, ErrNotSupported +} + +func getOnline() (int, error) { + return 0, ErrNotSupported +} + +func getPossible() (int, error) { + return 0, ErrNotSupported +} + +func getPresent() (int, error) { + return 0, ErrNotSupported +} diff --git a/vendor/github.com/tklauser/numcpus/numcpus_windows.go b/vendor/github.com/tklauser/numcpus/numcpus_windows.go new file mode 100644 index 0000000000000..f7d5b40295512 --- /dev/null +++ b/vendor/github.com/tklauser/numcpus/numcpus_windows.go @@ -0,0 +1,41 @@ +// Copyright 2022 Tobias Klauser +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package numcpus + +import "golang.org/x/sys/windows" + +func getConfigured() (int, error) { + return int(windows.GetActiveProcessorCount(windows.ALL_PROCESSOR_GROUPS)), nil +} + +func getKernelMax() (int, error) { + return int(windows.GetMaximumProcessorCount(windows.ALL_PROCESSOR_GROUPS)), nil +} + +func getOffline() (int, error) { + return 0, ErrNotSupported +} + +func getOnline() (int, error) { + return int(windows.GetActiveProcessorCount(windows.ALL_PROCESSOR_GROUPS)), nil +} + +func getPossible() (int, error) { + return int(windows.GetActiveProcessorCount(windows.ALL_PROCESSOR_GROUPS)), nil +} + +func getPresent() (int, error) { + return int(windows.GetActiveProcessorCount(windows.ALL_PROCESSOR_GROUPS)), nil +} diff --git a/vendor/github.com/yusufpapurcu/wmi/LICENSE b/vendor/github.com/yusufpapurcu/wmi/LICENSE new file mode 100644 index 0000000000000..ae80b67209e2a --- /dev/null +++ b/vendor/github.com/yusufpapurcu/wmi/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Stack Exchange + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/yusufpapurcu/wmi/README.md b/vendor/github.com/yusufpapurcu/wmi/README.md new file mode 100644 index 0000000000000..426d1a46b4aa9 --- /dev/null +++ b/vendor/github.com/yusufpapurcu/wmi/README.md @@ -0,0 +1,6 @@ +wmi +=== + +Package wmi provides a WQL interface to Windows WMI. + +Note: It interfaces with WMI on the local machine, therefore it only runs on Windows. diff --git a/vendor/github.com/yusufpapurcu/wmi/swbemservices.go b/vendor/github.com/yusufpapurcu/wmi/swbemservices.go new file mode 100644 index 0000000000000..a250c846d5e28 --- /dev/null +++ b/vendor/github.com/yusufpapurcu/wmi/swbemservices.go @@ -0,0 +1,261 @@ +//go:build windows +// +build windows + +package wmi + +import ( + "fmt" + "reflect" + "runtime" + "sync" + + "github.com/go-ole/go-ole" + "github.com/go-ole/go-ole/oleutil" +) + +// SWbemServices is used to access wmi. See https://msdn.microsoft.com/en-us/library/aa393719(v=vs.85).aspx +type SWbemServices struct { + //TODO: track namespace. Not sure if we can re connect to a different namespace using the same instance + cWMIClient *Client //This could also be an embedded struct, but then we would need to branch on Client vs SWbemServices in the Query method + sWbemLocatorIUnknown *ole.IUnknown + sWbemLocatorIDispatch *ole.IDispatch + queries chan *queryRequest + closeError chan error + lQueryorClose sync.Mutex +} + +type queryRequest struct { + query string + dst interface{} + args []interface{} + finished chan error +} + +// InitializeSWbemServices will return a new SWbemServices object that can be used to query WMI +func InitializeSWbemServices(c *Client, connectServerArgs ...interface{}) (*SWbemServices, error) { + //fmt.Println("InitializeSWbemServices: Starting") + //TODO: implement connectServerArgs as optional argument for init with connectServer call + s := new(SWbemServices) + s.cWMIClient = c + s.queries = make(chan *queryRequest) + initError := make(chan error) + go s.process(initError) + + err, ok := <-initError + if ok { + return nil, err //Send error to caller + } + //fmt.Println("InitializeSWbemServices: Finished") + return s, nil +} + +// Close will clear and release all of the SWbemServices resources +func (s *SWbemServices) Close() error { + s.lQueryorClose.Lock() + if s == nil || s.sWbemLocatorIDispatch == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices is not Initialized") + } + if s.queries == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices has been closed") + } + //fmt.Println("Close: sending close request") + var result error + ce := make(chan error) + s.closeError = ce //Race condition if multiple callers to close. May need to lock here + close(s.queries) //Tell background to shut things down + s.lQueryorClose.Unlock() + err, ok := <-ce + if ok { + result = err + } + //fmt.Println("Close: finished") + return result +} + +func (s *SWbemServices) process(initError chan error) { + //fmt.Println("process: starting background thread initialization") + //All OLE/WMI calls must happen on the same initialized thead, so lock this goroutine + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + if err != nil { + oleCode := err.(*ole.OleError).Code() + if oleCode != ole.S_OK && oleCode != S_FALSE { + initError <- fmt.Errorf("ole.CoInitializeEx error: %v", err) + return + } + } + defer ole.CoUninitialize() + + unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator") + if err != nil { + initError <- fmt.Errorf("CreateObject SWbemLocator error: %v", err) + return + } else if unknown == nil { + initError <- ErrNilCreateObject + return + } + defer unknown.Release() + s.sWbemLocatorIUnknown = unknown + + dispatch, err := s.sWbemLocatorIUnknown.QueryInterface(ole.IID_IDispatch) + if err != nil { + initError <- fmt.Errorf("SWbemLocator QueryInterface error: %v", err) + return + } + defer dispatch.Release() + s.sWbemLocatorIDispatch = dispatch + + // we can't do the ConnectServer call outside the loop unless we find a way to track and re-init the connectServerArgs + //fmt.Println("process: initialized. closing initError") + close(initError) + //fmt.Println("process: waiting for queries") + for q := range s.queries { + //fmt.Printf("process: new query: len(query)=%d\n", len(q.query)) + errQuery := s.queryBackground(q) + //fmt.Println("process: s.queryBackground finished") + if errQuery != nil { + q.finished <- errQuery + } + close(q.finished) + } + //fmt.Println("process: queries channel closed") + s.queries = nil //set channel to nil so we know it is closed + //TODO: I think the Release/Clear calls can panic if things are in a bad state. + //TODO: May need to recover from panics and send error to method caller instead. + close(s.closeError) +} + +// Query runs the WQL query using a SWbemServices instance and appends the values to dst. +// +// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in +// the query must have the same name in dst. Supported types are all signed and +// unsigned integers, time.Time, string, bool, or a pointer to one of those. +// Array types are not supported. +// +// By default, the local machine and default namespace are used. These can be +// changed using connectServerArgs. See +// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details. +func (s *SWbemServices) Query(query string, dst interface{}, connectServerArgs ...interface{}) error { + s.lQueryorClose.Lock() + if s == nil || s.sWbemLocatorIDispatch == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices is not Initialized") + } + if s.queries == nil { + s.lQueryorClose.Unlock() + return fmt.Errorf("SWbemServices has been closed") + } + + //fmt.Println("Query: Sending query request") + qr := queryRequest{ + query: query, + dst: dst, + args: connectServerArgs, + finished: make(chan error), + } + s.queries <- &qr + s.lQueryorClose.Unlock() + err, ok := <-qr.finished + if ok { + //fmt.Println("Query: Finished with error") + return err //Send error to caller + } + //fmt.Println("Query: Finished") + return nil +} + +func (s *SWbemServices) queryBackground(q *queryRequest) error { + if s == nil || s.sWbemLocatorIDispatch == nil { + return fmt.Errorf("SWbemServices is not Initialized") + } + wmi := s.sWbemLocatorIDispatch //Should just rename in the code, but this will help as we break things apart + //fmt.Println("queryBackground: Starting") + + dv := reflect.ValueOf(q.dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType := checkMultiArg(dv) + if mat == multiArgTypeInvalid { + return ErrInvalidEntityType + } + + // service is a SWbemServices + serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", q.args...) + if err != nil { + return err + } + service := serviceRaw.ToIDispatch() + defer serviceRaw.Clear() + + // result is a SWBemObjectSet + resultRaw, err := oleutil.CallMethod(service, "ExecQuery", q.query) + if err != nil { + return err + } + result := resultRaw.ToIDispatch() + defer resultRaw.Clear() + + count, err := oleInt64(result, "Count") + if err != nil { + return err + } + + enumProperty, err := result.GetProperty("_NewEnum") + if err != nil { + return err + } + defer enumProperty.Clear() + + enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) + if err != nil { + return err + } + if enum == nil { + return fmt.Errorf("can't get IEnumVARIANT, enum is nil") + } + defer enum.Release() + + // Initialize a slice with Count capacity + dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count))) + + var errFieldMismatch error + for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) { + if err != nil { + return err + } + + err := func() error { + // item is a SWbemObject, but really a Win32_Process + item := itemRaw.ToIDispatch() + defer item.Release() + + ev := reflect.New(elemType) + if err = s.cWMIClient.loadEntity(ev.Interface(), item); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + return nil + }() + if err != nil { + return err + } + } + //fmt.Println("queryBackground: Finished") + return errFieldMismatch +} diff --git a/vendor/github.com/yusufpapurcu/wmi/wmi.go b/vendor/github.com/yusufpapurcu/wmi/wmi.go new file mode 100644 index 0000000000000..03f386ed59cdc --- /dev/null +++ b/vendor/github.com/yusufpapurcu/wmi/wmi.go @@ -0,0 +1,603 @@ +//go:build windows +// +build windows + +/* +Package wmi provides a WQL interface for WMI on Windows. + +Example code to print names of running processes: + + type Win32_Process struct { + Name string + } + + func main() { + var dst []Win32_Process + q := wmi.CreateQuery(&dst, "") + err := wmi.Query(q, &dst) + if err != nil { + log.Fatal(err) + } + for i, v := range dst { + println(i, v.Name) + } + } +*/ +package wmi + +import ( + "bytes" + "errors" + "fmt" + "log" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-ole/go-ole" + "github.com/go-ole/go-ole/oleutil" +) + +var l = log.New(os.Stdout, "", log.LstdFlags) + +var ( + ErrInvalidEntityType = errors.New("wmi: invalid entity type") + // ErrNilCreateObject is the error returned if CreateObject returns nil even + // if the error was nil. + ErrNilCreateObject = errors.New("wmi: create object returned nil") + lock sync.Mutex +) + +// S_FALSE is returned by CoInitializeEx if it was already called on this thread. +const S_FALSE = 0x00000001 + +// QueryNamespace invokes Query with the given namespace on the local machine. +func QueryNamespace(query string, dst interface{}, namespace string) error { + return Query(query, dst, nil, namespace) +} + +// Query runs the WQL query and appends the values to dst. +// +// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in +// the query must have the same name in dst. Supported types are all signed and +// unsigned integers, time.Time, string, bool, or a pointer to one of those. +// Array types are not supported. +// +// By default, the local machine and default namespace are used. These can be +// changed using connectServerArgs. See +// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver +// for details. +// +// Query is a wrapper around DefaultClient.Query. +func Query(query string, dst interface{}, connectServerArgs ...interface{}) error { + if DefaultClient.SWbemServicesClient == nil { + return DefaultClient.Query(query, dst, connectServerArgs...) + } + return DefaultClient.SWbemServicesClient.Query(query, dst, connectServerArgs...) +} + +// CallMethod calls a method named methodName on an instance of the class named +// className, with the given params. +// +// CallMethod is a wrapper around DefaultClient.CallMethod. +func CallMethod(connectServerArgs []interface{}, className, methodName string, params []interface{}) (int32, error) { + return DefaultClient.CallMethod(connectServerArgs, className, methodName, params) +} + +// A Client is an WMI query client. +// +// Its zero value (DefaultClient) is a usable client. +type Client struct { + // NonePtrZero specifies if nil values for fields which aren't pointers + // should be returned as the field types zero value. + // + // Setting this to true allows stucts without pointer fields to be used + // without the risk failure should a nil value returned from WMI. + NonePtrZero bool + + // PtrNil specifies if nil values for pointer fields should be returned + // as nil. + // + // Setting this to true will set pointer fields to nil where WMI + // returned nil, otherwise the types zero value will be returned. + PtrNil bool + + // AllowMissingFields specifies that struct fields not present in the + // query result should not result in an error. + // + // Setting this to true allows custom queries to be used with full + // struct definitions instead of having to define multiple structs. + AllowMissingFields bool + + // SWbemServiceClient is an optional SWbemServices object that can be + // initialized and then reused across multiple queries. If it is null + // then the method will initialize a new temporary client each time. + SWbemServicesClient *SWbemServices +} + +// DefaultClient is the default Client and is used by Query, QueryNamespace, and CallMethod. +var DefaultClient = &Client{} + +// coinitService coinitializes WMI service. If no error is returned, a cleanup function +// is returned which must be executed (usually deferred) to clean up allocated resources. +func (c *Client) coinitService(connectServerArgs ...interface{}) (*ole.IDispatch, func(), error) { + var unknown *ole.IUnknown + var wmi *ole.IDispatch + var serviceRaw *ole.VARIANT + + // be sure teardown happens in the reverse + // order from that which they were created + deferFn := func() { + if serviceRaw != nil { + serviceRaw.Clear() + } + if wmi != nil { + wmi.Release() + } + if unknown != nil { + unknown.Release() + } + ole.CoUninitialize() + } + + // if we error'ed here, clean up immediately + var err error + defer func() { + if err != nil { + deferFn() + } + }() + + err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + if err != nil { + oleCode := err.(*ole.OleError).Code() + if oleCode != ole.S_OK && oleCode != S_FALSE { + return nil, nil, err + } + } + + unknown, err = oleutil.CreateObject("WbemScripting.SWbemLocator") + if err != nil { + return nil, nil, err + } else if unknown == nil { + return nil, nil, ErrNilCreateObject + } + + wmi, err = unknown.QueryInterface(ole.IID_IDispatch) + if err != nil { + return nil, nil, err + } + + // service is a SWbemServices + serviceRaw, err = oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...) + if err != nil { + return nil, nil, err + } + + return serviceRaw.ToIDispatch(), deferFn, nil +} + +// CallMethod calls a WMI method named methodName on an instance +// of the class named className. It passes in the arguments given +// in params. Use connectServerArgs to customize the machine and +// namespace; by default, the local machine and default namespace +// are used. See +// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver +// for details. +func (c *Client) CallMethod(connectServerArgs []interface{}, className, methodName string, params []interface{}) (int32, error) { + service, cleanup, err := c.coinitService(connectServerArgs...) + if err != nil { + return 0, fmt.Errorf("coinit: %v", err) + } + defer cleanup() + + // Get class + classRaw, err := oleutil.CallMethod(service, "Get", className) + if err != nil { + return 0, fmt.Errorf("CallMethod Get class %s: %v", className, err) + } + class := classRaw.ToIDispatch() + defer classRaw.Clear() + + // Run method + resultRaw, err := oleutil.CallMethod(class, methodName, params...) + if err != nil { + return 0, fmt.Errorf("CallMethod %s.%s: %v", className, methodName, err) + } + resultInt, ok := resultRaw.Value().(int32) + if !ok { + return 0, fmt.Errorf("return value was not an int32: %v (%T)", resultRaw, resultRaw) + } + + return resultInt, nil +} + +// Query runs the WQL query and appends the values to dst. +// +// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in +// the query must have the same name in dst. Supported types are all signed and +// unsigned integers, time.Time, string, bool, or a pointer to one of those. +// Array types are not supported. +// +// By default, the local machine and default namespace are used. These can be +// changed using connectServerArgs. See +// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver +// for details. +func (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error { + dv := reflect.ValueOf(dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType := checkMultiArg(dv) + if mat == multiArgTypeInvalid { + return ErrInvalidEntityType + } + + lock.Lock() + defer lock.Unlock() + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + service, cleanup, err := c.coinitService(connectServerArgs...) + if err != nil { + return err + } + defer cleanup() + + // result is a SWBemObjectSet + resultRaw, err := oleutil.CallMethod(service, "ExecQuery", query) + if err != nil { + return err + } + result := resultRaw.ToIDispatch() + defer resultRaw.Clear() + + count, err := oleInt64(result, "Count") + if err != nil { + return err + } + + enumProperty, err := result.GetProperty("_NewEnum") + if err != nil { + return err + } + defer enumProperty.Clear() + + enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) + if err != nil { + return err + } + if enum == nil { + return fmt.Errorf("can't get IEnumVARIANT, enum is nil") + } + defer enum.Release() + + // Initialize a slice with Count capacity + dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count))) + + var errFieldMismatch error + for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) { + if err != nil { + return err + } + + err := func() error { + // item is a SWbemObject, but really a Win32_Process + item := itemRaw.ToIDispatch() + defer item.Release() + + ev := reflect.New(elemType) + if err = c.loadEntity(ev.Interface(), item); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + return nil + }() + if err != nil { + return err + } + } + return errFieldMismatch +} + +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. +// StructType is the type of the struct pointed to by the destination argument. +type ErrFieldMismatch struct { + StructType reflect.Type + FieldName string + Reason string +} + +func (e *ErrFieldMismatch) Error() string { + return fmt.Sprintf("wmi: cannot load field %q into a %q: %s", + e.FieldName, e.StructType, e.Reason) +} + +var timeType = reflect.TypeOf(time.Time{}) + +// loadEntity loads a SWbemObject into a struct pointer. +func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) { + v := reflect.ValueOf(dst).Elem() + for i := 0; i < v.NumField(); i++ { + f := v.Field(i) + of := f + isPtr := f.Kind() == reflect.Ptr + n := v.Type().Field(i).Name + if n[0] < 'A' || n[0] > 'Z' { + continue + } + if !f.CanSet() { + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "CanSet() is false", + } + } + prop, err := oleutil.GetProperty(src, n) + if err != nil { + if !c.AllowMissingFields { + errFieldMismatch = &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "no such struct field", + } + } + continue + } + defer prop.Clear() + + if isPtr && !(c.PtrNil && prop.VT == 0x1) { + ptr := reflect.New(f.Type().Elem()) + f.Set(ptr) + f = f.Elem() + } + + if prop.VT == 0x1 { //VT_NULL + continue + } + + switch val := prop.Value().(type) { + case int8, int16, int32, int64, int: + v := reflect.ValueOf(val).Int() + switch f.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + f.SetInt(v) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + f.SetUint(uint64(v)) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not an integer class", + } + } + case uint8, uint16, uint32, uint64: + v := reflect.ValueOf(val).Uint() + switch f.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + f.SetInt(int64(v)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + f.SetUint(v) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not an integer class", + } + } + case string: + switch f.Kind() { + case reflect.String: + f.SetString(val) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + iv, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return err + } + f.SetInt(iv) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + uv, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return err + } + f.SetUint(uv) + case reflect.Struct: + switch f.Type() { + case timeType: + if len(val) == 25 { + mins, err := strconv.Atoi(val[22:]) + if err != nil { + return err + } + val = val[:22] + fmt.Sprintf("%02d%02d", mins/60, mins%60) + } + t, err := time.Parse("20060102150405.000000-0700", val) + if err != nil { + return err + } + f.Set(reflect.ValueOf(t)) + } + } + case bool: + switch f.Kind() { + case reflect.Bool: + f.SetBool(val) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not a bool", + } + } + case float32: + switch f.Kind() { + case reflect.Float32: + f.SetFloat(float64(val)) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not a Float32", + } + } + case float64: + switch f.Kind() { + case reflect.Float32, reflect.Float64: + f.SetFloat(val) + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: "not a Float64", + } + } + + default: + if f.Kind() == reflect.Slice { + switch f.Type().Elem().Kind() { + case reflect.String: + safeArray := prop.ToArray() + if safeArray != nil { + arr := safeArray.ToValueArray() + fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) + for i, v := range arr { + s := fArr.Index(i) + s.SetString(v.(string)) + } + f.Set(fArr) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + safeArray := prop.ToArray() + if safeArray != nil { + arr := safeArray.ToValueArray() + fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) + for i, v := range arr { + s := fArr.Index(i) + s.SetUint(reflect.ValueOf(v).Uint()) + } + f.Set(fArr) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + safeArray := prop.ToArray() + if safeArray != nil { + arr := safeArray.ToValueArray() + fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) + for i, v := range arr { + s := fArr.Index(i) + s.SetInt(reflect.ValueOf(v).Int()) + } + f.Set(fArr) + } + default: + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: fmt.Sprintf("unsupported slice type (%T)", val), + } + } + } else { + typeof := reflect.TypeOf(val) + if typeof == nil && (isPtr || c.NonePtrZero) { + if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) { + of.Set(reflect.Zero(of.Type())) + } + break + } + return &ErrFieldMismatch{ + StructType: of.Type(), + FieldName: n, + Reason: fmt.Sprintf("unsupported type (%T)", val), + } + } + } + } + return errFieldMismatch +} + +type multiArgType int + +const ( + multiArgTypeInvalid multiArgType = iota + multiArgTypeStruct + multiArgTypeStructPtr +) + +// checkMultiArg checks that v has type []S, []*S for some struct type S. +// +// It returns what category the slice's elements are, and the reflect.Type +// that represents S. +func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { + if v.Kind() != reflect.Slice { + return multiArgTypeInvalid, nil + } + elemType = v.Type().Elem() + switch elemType.Kind() { + case reflect.Struct: + return multiArgTypeStruct, elemType + case reflect.Ptr: + elemType = elemType.Elem() + if elemType.Kind() == reflect.Struct { + return multiArgTypeStructPtr, elemType + } + } + return multiArgTypeInvalid, nil +} + +func oleInt64(item *ole.IDispatch, prop string) (int64, error) { + v, err := oleutil.GetProperty(item, prop) + if err != nil { + return 0, err + } + defer v.Clear() + + i := int64(v.Val) + return i, nil +} + +// CreateQuery returns a WQL query string that queries all columns of src. where +// is an optional string that is appended to the query, to be used with WHERE +// clauses. In such a case, the "WHERE" string should appear at the beginning. +// The wmi class is obtained by the name of the type. You can pass a optional +// class throught the variadic class parameter which is useful for anonymous +// structs. +func CreateQuery(src interface{}, where string, class ...string) string { + var b bytes.Buffer + b.WriteString("SELECT ") + s := reflect.Indirect(reflect.ValueOf(src)) + t := s.Type() + if s.Kind() == reflect.Slice { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return "" + } + var fields []string + for i := 0; i < t.NumField(); i++ { + fields = append(fields, t.Field(i).Name) + } + b.WriteString(strings.Join(fields, ", ")) + b.WriteString(" FROM ") + if len(class) > 0 { + b.WriteString(class[0]) + } else { + b.WriteString(t.Name()) + } + b.WriteString(" " + where) + return b.String() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f0152af75d4c2..090bcd5468266 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -729,6 +729,10 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr +# github.com/go-ole/go-ole v1.2.6 +## explicit; go 1.12 +github.com/go-ole/go-ole +github.com/go-ole/go-ole/oleutil # github.com/go-openapi/analysis v0.22.2 ## explicit; go 1.19 github.com/go-openapi/analysis @@ -1175,6 +1179,9 @@ github.com/leodido/go-urn ## explicit github.com/leodido/ragel-machinery github.com/leodido/ragel-machinery/parser +# github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 +## explicit; go 1.16 +github.com/lufia/plan9stats # github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer @@ -1298,6 +1305,9 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib +# github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c +## explicit; go 1.14 +github.com/power-devops/perfstat # github.com/prometheus/alertmanager v0.27.0 ## explicit; go 1.21 github.com/prometheus/alertmanager/api/v2/models @@ -1426,6 +1436,17 @@ github.com/segmentio/fasthash/fnv1a # github.com/sercand/kuberesolver/v5 v5.1.1 ## explicit; go 1.18 github.com/sercand/kuberesolver/v5 +# github.com/shirou/gopsutil/v4 v4.24.0-alpha.1 +## explicit; go 1.20 +github.com/shirou/gopsutil/v4/common +github.com/shirou/gopsutil/v4/cpu +github.com/shirou/gopsutil/v4/internal/common +github.com/shirou/gopsutil/v4/mem +github.com/shirou/gopsutil/v4/net +github.com/shirou/gopsutil/v4/process +# github.com/shoenig/go-m1cpu v0.1.6 +## explicit; go 1.20 +github.com/shoenig/go-m1cpu # github.com/shopspring/decimal v1.2.0 ## explicit; go 1.13 github.com/shopspring/decimal @@ -1479,6 +1500,12 @@ github.com/thanos-io/objstore/providers/gcs github.com/thanos-io/objstore/providers/s3 github.com/thanos-io/objstore/providers/swift github.com/thanos-io/objstore/tracing/opentracing +# github.com/tklauser/go-sysconf v0.3.12 +## explicit; go 1.13 +github.com/tklauser/go-sysconf +# github.com/tklauser/numcpus v0.6.1 +## explicit; go 1.13 +github.com/tklauser/numcpus # github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 ## explicit github.com/tonistiigi/fifo @@ -1530,6 +1557,9 @@ github.com/yuin/gopher-lua github.com/yuin/gopher-lua/ast github.com/yuin/gopher-lua/parse github.com/yuin/gopher-lua/pm +# github.com/yusufpapurcu/wmi v1.2.4 +## explicit; go 1.16 +github.com/yusufpapurcu/wmi # go.etcd.io/bbolt v1.3.6 ## explicit; go 1.12 go.etcd.io/bbolt From 88c671162f70e075f6aa43599aa560fe7b4b5627 Mon Sep 17 00:00:00 2001 From: Karsten Jeschkies Date: Thu, 16 May 2024 17:37:35 +0200 Subject: [PATCH 34/47] fix: Track bytes discarded by ingester. (#12981) **What this PR does / why we need it**: Only the distributor was tracking discarded bytes. The ingester was missing the tracker and calls. **Checklist** - [ ] Reviewed the [`CONTRIBUTING.md`](https://github.com/grafana/loki/blob/main/CONTRIBUTING.md) guide (**required**) - [ ] Documentation added - [x] Tests updated - [x] Title matches the required conventional commits format, see [here](https://www.conventionalcommits.org/en/v1.0.0/) - **Note** that Promtail is considered to be feature complete, and future development for logs collection will be in [Grafana Alloy](https://github.com/grafana/alloy). As such, `feat` PRs are unlikely to be accepted unless a case can be made for the feature actually being a bug fix to existing behavior. - [ ] Changes that require user attention or interaction to upgrade are documented in `docs/sources/setup/upgrade/_index.md` - [ ] For Helm chart changes bump the Helm chart version in `production/helm/loki/Chart.yaml` and update `production/helm/loki/CHANGELOG.md` and `production/helm/loki/README.md`. [Example PR](https://github.com/grafana/loki/commit/d10549e3ece02120974929894ee333d07755d213) - [ ] If the change is deprecating or removing a configuration option, update the `deprecated-config.yaml` and `deleted-config.yaml` files respectively in the `tools/deprecated-config-checker` directory. [Example PR](https://github.com/grafana/loki/pull/10840/commits/0d4416a4b03739583349934b96f272fb4f685d15) --- pkg/ingester/checkpoint_test.go | 26 ++++++++++++------------ pkg/ingester/flush_test.go | 2 +- pkg/ingester/ingester.go | 8 ++++++-- pkg/ingester/ingester_test.go | 24 +++++++++++----------- pkg/ingester/instance.go | 5 ++++- pkg/ingester/instance_test.go | 34 +++++++++++++++++++++++--------- pkg/ingester/recovery.go | 2 +- pkg/ingester/recovery_test.go | 4 ++-- pkg/ingester/stream.go | 24 +++++++++++++++------- pkg/ingester/stream_test.go | 35 ++++++++++++++++++++------------- pkg/ingester/tailer.go | 2 +- pkg/loki/modules.go | 2 +- 12 files changed, 104 insertions(+), 64 deletions(-) diff --git a/pkg/ingester/checkpoint_test.go b/pkg/ingester/checkpoint_test.go index e8871e7a13918..d530d937d42fe 100644 --- a/pkg/ingester/checkpoint_test.go +++ b/pkg/ingester/checkpoint_test.go @@ -70,7 +70,7 @@ func TestIngesterWAL(t *testing.T) { } } - i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) require.NoError(t, err) require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -113,7 +113,7 @@ func TestIngesterWAL(t *testing.T) { expectCheckpoint(t, walDir, false, time.Second) // restart the ingester - i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger()) + i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) @@ -127,7 +127,7 @@ func TestIngesterWAL(t *testing.T) { require.Nil(t, services.StopAndAwaitTerminated(context.Background(), i)) // restart the ingester - i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger()) + i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) @@ -150,7 +150,7 @@ func TestIngesterWALIgnoresStreamLimits(t *testing.T) { } } - i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) require.NoError(t, err) require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -196,7 +196,7 @@ func TestIngesterWALIgnoresStreamLimits(t *testing.T) { require.NoError(t, err) // restart the ingester - i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger()) + i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) @@ -253,7 +253,7 @@ func TestIngesterWALBackpressureSegments(t *testing.T) { } } - i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) require.NoError(t, err) require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -274,7 +274,7 @@ func TestIngesterWALBackpressureSegments(t *testing.T) { expectCheckpoint(t, walDir, false, time.Second) // restart the ingester, ensuring we replayed from WAL. - i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger()) + i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) @@ -295,7 +295,7 @@ func TestIngesterWALBackpressureCheckpoint(t *testing.T) { } } - i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) require.NoError(t, err) require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -316,7 +316,7 @@ func TestIngesterWALBackpressureCheckpoint(t *testing.T) { require.Nil(t, services.StopAndAwaitTerminated(context.Background(), i)) // restart the ingester, ensuring we can replay from the checkpoint as well. - i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger()) + i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) @@ -452,7 +452,7 @@ func Test_SeriesIterator(t *testing.T) { limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) for i := 0; i < 3; i++ { - inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("%d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil) + inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("%d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil, nil) require.Nil(t, err) require.NoError(t, inst.Push(context.Background(), &logproto.PushRequest{Streams: []logproto.Stream{stream1}})) require.NoError(t, inst.Push(context.Background(), &logproto.PushRequest{Streams: []logproto.Stream{stream2}})) @@ -499,7 +499,7 @@ func Benchmark_SeriesIterator(b *testing.B) { limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) for i := range instances { - inst, _ := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("instance %d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil) + inst, _ := newInstance(defaultConfig(), defaultPeriodConfigs, fmt.Sprintf("instance %d", i), limiter, runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, nil, nil, nil, nil, NewStreamRateCalculator(), nil, nil) require.NoError(b, inst.Push(context.Background(), &logproto.PushRequest{ @@ -591,7 +591,7 @@ func TestIngesterWALReplaysUnorderedToOrdered(t *testing.T) { } } - i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) require.NoError(t, err) require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -663,7 +663,7 @@ func TestIngesterWALReplaysUnorderedToOrdered(t *testing.T) { require.NoError(t, err) // restart the ingester - i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger()) + i, err = New(ingesterConfig, client.Config{}, newStore(), limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokit_log.NewNopLogger(), nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck require.Nil(t, services.StartAndAwaitRunning(context.Background(), i)) diff --git a/pkg/ingester/flush_test.go b/pkg/ingester/flush_test.go index e4fc748f2560d..6fd52bafa066f 100644 --- a/pkg/ingester/flush_test.go +++ b/pkg/ingester/flush_test.go @@ -278,7 +278,7 @@ func newTestStore(t require.TestingT, cfg Config, walOverride WAL) (*testStore, limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - ing, err := New(cfg, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokitlog.NewNopLogger()) + ing, err := New(cfg, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, gokitlog.NewNopLogger(), nil) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), ing)) diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 640c64eee6b63..6d27d349c93f4 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -12,6 +12,7 @@ import ( "sync" "time" + "github.com/grafana/loki/v3/pkg/loghttp/push" "github.com/grafana/loki/v3/pkg/logqlmodel/metadata" "github.com/grafana/loki/v3/pkg/storage/types" @@ -242,10 +243,12 @@ type Ingester struct { streamRateCalculator *StreamRateCalculator writeLogManager *writefailures.Manager + + customStreamsTracker push.UsageTracker } // New makes a new Ingester. -func New(cfg Config, clientConfig client.Config, store Store, limits Limits, configs *runtime.TenantConfigs, registerer prometheus.Registerer, writeFailuresCfg writefailures.Cfg, metricsNamespace string, logger log.Logger) (*Ingester, error) { +func New(cfg Config, clientConfig client.Config, store Store, limits Limits, configs *runtime.TenantConfigs, registerer prometheus.Registerer, writeFailuresCfg writefailures.Cfg, metricsNamespace string, logger log.Logger, customStreamsTracker push.UsageTracker) (*Ingester, error) { if cfg.ingesterClientFactory == nil { cfg.ingesterClientFactory = client.New } @@ -273,6 +276,7 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con terminateOnShutdown: false, streamRateCalculator: NewStreamRateCalculator(), writeLogManager: writefailures.NewManager(logger, registerer, writeFailuresCfg, configs, "ingester"), + customStreamsTracker: customStreamsTracker, } i.replayController = newReplayController(metrics, cfg.WAL, &replayFlusher{i}) @@ -863,7 +867,7 @@ func (i *Ingester) GetOrCreateInstance(instanceID string) (*instance, error) { / inst, ok = i.instances[instanceID] if !ok { var err error - inst, err = newInstance(&i.cfg, i.periodicConfigs, instanceID, i.limiter, i.tenantConfigs, i.wal, i.metrics, i.flushOnShutdownSwitch, i.chunkFilter, i.pipelineWrapper, i.extractorWrapper, i.streamRateCalculator, i.writeLogManager) + inst, err = newInstance(&i.cfg, i.periodicConfigs, instanceID, i.limiter, i.tenantConfigs, i.wal, i.metrics, i.flushOnShutdownSwitch, i.chunkFilter, i.pipelineWrapper, i.extractorWrapper, i.streamRateCalculator, i.writeLogManager, i.customStreamsTracker) if err != nil { return nil, err } diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index b31053a5ded17..035a62e5a6414 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -57,7 +57,7 @@ func TestPrepareShutdownMarkerPathNotSet(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -80,7 +80,7 @@ func TestPrepareShutdown(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -141,7 +141,7 @@ func TestIngester_GetStreamRates_Correctness(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -173,7 +173,7 @@ func BenchmarkGetStreamRatesAllocs(b *testing.B) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) require.NoError(b, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -197,7 +197,7 @@ func TestIngester(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -382,7 +382,7 @@ func TestIngesterStreamLimitExceeded(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, overrides, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, store, overrides, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -740,7 +740,7 @@ func Test_InMemoryLabels(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -794,7 +794,7 @@ func TestIngester_GetDetectedLabels(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -857,7 +857,7 @@ func TestIngester_GetDetectedLabelsWithQuery(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, store, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) require.NoError(t, err) defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck @@ -1224,7 +1224,7 @@ func TestStats(t *testing.T) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) require.NoError(t, err) i.instances["test"] = defaultInstance(t) @@ -1251,7 +1251,7 @@ func TestVolume(t *testing.T) { limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) require.NoError(t, err) i.instances["test"] = defaultInstance(t) @@ -1330,7 +1330,7 @@ func createIngesterServer(t *testing.T, ingesterConfig Config) (ingesterClient, limits, err := validation.NewOverrides(defaultLimitsTestConfig(), nil) require.NoError(t, err) - ing, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + ing, err := New(ingesterConfig, client.Config{}, &mockStore{}, limits, runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) require.NoError(t, err) listener := bufconn.Listen(1024 * 1024) diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go index eb98f8a39b630..a4436b9d41915 100644 --- a/pkg/ingester/instance.go +++ b/pkg/ingester/instance.go @@ -141,6 +141,7 @@ func newInstance( extractorWrapper log.SampleExtractorWrapper, streamRateCalculator *StreamRateCalculator, writeFailures *writefailures.Manager, + customStreamsTracker push.UsageTracker, ) (*instance, error) { invertedIndex, err := index.NewMultiInvertedIndex(periodConfigs, uint32(cfg.IndexShards)) if err != nil { @@ -174,6 +175,8 @@ func newInstance( writeFailures: writeFailures, schemaconfig: &c, + + customStreamsTracker: customStreamsTracker, } i.mapper = NewFPMapper(i.getLabelsFromFingerprint) return i, err @@ -241,7 +244,7 @@ func (i *instance) Push(ctx context.Context, req *logproto.PushRequest) error { continue } - _, appendErr = s.Push(ctx, reqStream.Entries, record, 0, false, rateLimitWholeStream) + _, appendErr = s.Push(ctx, reqStream.Entries, record, 0, false, rateLimitWholeStream, i.customStreamsTracker) s.chunkMtx.Unlock() } diff --git a/pkg/ingester/instance_test.go b/pkg/ingester/instance_test.go index 88b613aa8db2d..7f7dc30361d6a 100644 --- a/pkg/ingester/instance_test.go +++ b/pkg/ingester/instance_test.go @@ -73,7 +73,7 @@ func TestLabelsCollisions(t *testing.T) { require.NoError(t, err) limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) - i, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil) + i, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil) require.Nil(t, err) // avoid entries from the future. @@ -101,7 +101,7 @@ func TestConcurrentPushes(t *testing.T) { require.NoError(t, err) limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) - inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil) + inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil) require.Nil(t, err) const ( @@ -153,7 +153,7 @@ func TestGetStreamRates(t *testing.T) { require.NoError(t, err) limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) - inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil) + inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil) require.NoError(t, err) const ( @@ -247,7 +247,7 @@ func TestSyncPeriod(t *testing.T) { minUtil = 0.20 ) - inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil) + inst, err := newInstance(defaultConfig(), defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil) require.Nil(t, err) lbls := makeRandomLabels() @@ -292,7 +292,7 @@ func setupTestStreams(t *testing.T) (*instance, time.Time, int) { cfg.SyncMinUtilization = 0.20 cfg.IndexShards = indexShards - instance, err := newInstance(cfg, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil) + instance, err := newInstance(cfg, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil) require.Nil(t, err) currentTime := time.Now() @@ -501,7 +501,7 @@ func Benchmark_PushInstance(b *testing.B) { require.NoError(b, err) limiter := NewLimiter(limits, NilMetrics, &ringCountMock{count: 1}, 1) - i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil) + i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil) ctx := context.Background() for n := 0; n < b.N; n++ { @@ -545,7 +545,7 @@ func Benchmark_instance_addNewTailer(b *testing.B) { ctx := context.Background() - inst, _ := newInstance(&Config{}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil) + inst, _ := newInstance(&Config{}, defaultPeriodConfigs, "test", limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil) expr, err := syntax.ParseLogSelector(`{namespace="foo",pod="bar",instance=~"10.*"}`, true) require.NoError(b, err) t, err := newTailer("foo", expr, nil, 10) @@ -1095,7 +1095,8 @@ func TestStreamShardingUsage(t *testing.T) { }) t.Run("invalid push returns error", func(t *testing.T) { - i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant1, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil) + tracker := &mockUsageTracker{} + i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant1, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, tracker) ctx := context.Background() err = i.Push(ctx, &logproto.PushRequest{ @@ -1111,10 +1112,11 @@ func TestStreamShardingUsage(t *testing.T) { }, }) require.Error(t, err) + require.Equal(t, 3.0, tracker.discardedBytes) }) t.Run("valid push returns no error", func(t *testing.T) { - i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant2, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil) + i, _ := newInstance(&Config{IndexShards: 1}, defaultPeriodConfigs, customTenant2, limiter, loki_runtime.DefaultTenantConfigs(), noopWAL{}, NilMetrics, &OnceSwitch{}, nil, nil, nil, NewStreamRateCalculator(), nil, nil) ctx := context.Background() err = i.Push(ctx, &logproto.PushRequest{ @@ -1449,6 +1451,7 @@ func defaultInstance(t *testing.T) *instance { nil, NewStreamRateCalculator(), nil, + nil, ) require.Nil(t, err) insertData(t, instance) @@ -1535,3 +1538,16 @@ func (f fakeQueryServer) Send(res *logproto.QueryResponse) error { return f(res) } func (f fakeQueryServer) Context() context.Context { return context.TODO() } + +type mockUsageTracker struct { + discardedBytes float64 +} + +// DiscardedBytesAdd implements push.UsageTracker. +func (m *mockUsageTracker) DiscardedBytesAdd(_ context.Context, _ string, _ string, _ labels.Labels, value float64) { + m.discardedBytes += value +} + +// ReceivedBytesAdd implements push.UsageTracker. +func (*mockUsageTracker) ReceivedBytesAdd(_ context.Context, _ string, _ time.Duration, _ labels.Labels, _ float64) { +} diff --git a/pkg/ingester/recovery.go b/pkg/ingester/recovery.go index a93151e0e6fca..e8b1c244871bb 100644 --- a/pkg/ingester/recovery.go +++ b/pkg/ingester/recovery.go @@ -168,7 +168,7 @@ func (r *ingesterRecoverer) Push(userID string, entries wal.RefEntries) error { } // ignore out of order errors here (it's possible for a checkpoint to already have data from the wal segments) - bytesAdded, err := s.(*stream).Push(context.Background(), entries.Entries, nil, entries.Counter, true, false) + bytesAdded, err := s.(*stream).Push(context.Background(), entries.Entries, nil, entries.Counter, true, false, r.ing.customStreamsTracker) r.ing.replayController.Add(int64(bytesAdded)) if err != nil && err == ErrEntriesExist { r.ing.metrics.duplicateEntriesTotal.Add(float64(len(entries.Entries))) diff --git a/pkg/ingester/recovery_test.go b/pkg/ingester/recovery_test.go index fd8f05136d6f5..9176ff3c6ad2f 100644 --- a/pkg/ingester/recovery_test.go +++ b/pkg/ingester/recovery_test.go @@ -228,7 +228,7 @@ func TestSeriesRecoveryNoDuplicates(t *testing.T) { chunks: map[string][]chunk.Chunk{}, } - i, err := New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + i, err := New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) require.NoError(t, err) mkSample := func(i int) *logproto.PushRequest { @@ -262,7 +262,7 @@ func TestSeriesRecoveryNoDuplicates(t *testing.T) { require.Equal(t, false, iter.Next()) // create a new ingester now - i, err = New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger()) + i, err = New(ingesterConfig, client.Config{}, store, limits, loki_runtime.DefaultTenantConfigs(), nil, writefailures.Cfg{}, constants.Loki, log.NewNopLogger(), nil) require.NoError(t, err) // recover the checkpointed series diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go index 6bf75dfa1ac54..0aa3c41ea619b 100644 --- a/pkg/ingester/stream.go +++ b/pkg/ingester/stream.go @@ -19,6 +19,7 @@ import ( "github.com/grafana/loki/v3/pkg/distributor/writefailures" "github.com/grafana/loki/v3/pkg/ingester/wal" "github.com/grafana/loki/v3/pkg/iter" + "github.com/grafana/loki/v3/pkg/loghttp/push" "github.com/grafana/loki/v3/pkg/logproto" "github.com/grafana/loki/v3/pkg/logql/log" "github.com/grafana/loki/v3/pkg/logqlmodel/stats" @@ -181,6 +182,8 @@ func (s *stream) Push( lockChunk bool, // Whether nor not to ingest all at once or not. It is a per-tenant configuration. rateLimitWholeStream bool, + + usageTracker push.UsageTracker, ) (int, error) { if lockChunk { s.chunkMtx.Lock() @@ -199,7 +202,7 @@ func (s *stream) Push( return 0, ErrEntriesExist } - toStore, invalid := s.validateEntries(entries, isReplay, rateLimitWholeStream) + toStore, invalid := s.validateEntries(ctx, entries, isReplay, rateLimitWholeStream, usageTracker) if rateLimitWholeStream && hasRateLimitErr(invalid) { return 0, errorForFailedEntries(s, invalid, len(entries)) } @@ -213,7 +216,7 @@ func (s *stream) Push( s.metrics.chunkCreatedStats.Inc(1) } - bytesAdded, storedEntries, entriesWithErr := s.storeEntries(ctx, toStore) + bytesAdded, storedEntries, entriesWithErr := s.storeEntries(ctx, toStore, usageTracker) s.recordAndSendToTailers(record, storedEntries) if len(s.chunks) != prevNumChunks { @@ -313,7 +316,7 @@ func (s *stream) recordAndSendToTailers(record *wal.Record, entries []logproto.E } } -func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry) (int, []logproto.Entry, []entryWithError) { +func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry, usageTracker push.UsageTracker) (int, []logproto.Entry, []entryWithError) { if sp := opentracing.SpanFromContext(ctx); sp != nil { sp.LogKV("event", "stream started to store entries", "labels", s.labelsString) defer sp.LogKV("event", "stream finished to store entries") @@ -350,11 +353,12 @@ func (s *stream) storeEntries(ctx context.Context, entries []logproto.Entry) (in bytesAdded += len(entries[i].Line) storedEntries = append(storedEntries, entries[i]) } - s.reportMetrics(outOfOrderSamples, outOfOrderBytes, 0, 0) + s.reportMetrics(ctx, outOfOrderSamples, outOfOrderBytes, 0, 0, usageTracker) return bytesAdded, storedEntries, invalid } -func (s *stream) validateEntries(entries []logproto.Entry, isReplay, rateLimitWholeStream bool) ([]logproto.Entry, []entryWithError) { +func (s *stream) validateEntries(ctx context.Context, entries []logproto.Entry, isReplay, rateLimitWholeStream bool, usageTracker push.UsageTracker) ([]logproto.Entry, []entryWithError) { + var ( outOfOrderSamples, outOfOrderBytes int rateLimitedSamples, rateLimitedBytes int @@ -427,11 +431,11 @@ func (s *stream) validateEntries(entries []logproto.Entry, isReplay, rateLimitWh } s.streamRateCalculator.Record(s.tenant, s.labelHash, s.labelHashNoShard, totalBytes) - s.reportMetrics(outOfOrderSamples, outOfOrderBytes, rateLimitedSamples, rateLimitedBytes) + s.reportMetrics(ctx, outOfOrderSamples, outOfOrderBytes, rateLimitedSamples, rateLimitedBytes, usageTracker) return toStore, failedEntriesWithError } -func (s *stream) reportMetrics(outOfOrderSamples, outOfOrderBytes, rateLimitedSamples, rateLimitedBytes int) { +func (s *stream) reportMetrics(ctx context.Context, outOfOrderSamples, outOfOrderBytes, rateLimitedSamples, rateLimitedBytes int, usageTracker push.UsageTracker) { if outOfOrderSamples > 0 { name := validation.OutOfOrder if s.unorderedWrites { @@ -439,10 +443,16 @@ func (s *stream) reportMetrics(outOfOrderSamples, outOfOrderBytes, rateLimitedSa } validation.DiscardedSamples.WithLabelValues(name, s.tenant).Add(float64(outOfOrderSamples)) validation.DiscardedBytes.WithLabelValues(name, s.tenant).Add(float64(outOfOrderBytes)) + if usageTracker != nil { + usageTracker.DiscardedBytesAdd(ctx, s.tenant, name, s.labels, float64(outOfOrderBytes)) + } } if rateLimitedSamples > 0 { validation.DiscardedSamples.WithLabelValues(validation.StreamRateLimit, s.tenant).Add(float64(rateLimitedSamples)) validation.DiscardedBytes.WithLabelValues(validation.StreamRateLimit, s.tenant).Add(float64(rateLimitedBytes)) + if usageTracker != nil { + usageTracker.DiscardedBytesAdd(ctx, s.tenant, validation.StreamRateLimit, s.labels, float64(rateLimitedBytes)) + } } } diff --git a/pkg/ingester/stream_test.go b/pkg/ingester/stream_test.go index af877bf88da9e..e4dd4a37ab355 100644 --- a/pkg/ingester/stream_test.go +++ b/pkg/ingester/stream_test.go @@ -73,7 +73,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) { _, err := s.Push(context.Background(), []logproto.Entry{ {Timestamp: time.Unix(int64(numLogs), 0), Line: "log"}, - }, recordPool.GetRecord(), 0, true, false) + }, recordPool.GetRecord(), 0, true, false, nil) require.NoError(t, err) newLines := make([]logproto.Entry, numLogs) @@ -94,7 +94,7 @@ func TestMaxReturnedStreamsErrors(t *testing.T) { fmt.Fprintf(&expected, "user 'fake', total ignored: %d out of %d for stream: {foo=\"bar\"}", numLogs, numLogs) expectErr := httpgrpc.Errorf(http.StatusBadRequest, expected.String()) - _, err = s.Push(context.Background(), newLines, recordPool.GetRecord(), 0, true, false) + _, err = s.Push(context.Background(), newLines, recordPool.GetRecord(), 0, true, false, nil) require.Error(t, err) require.Equal(t, expectErr.Error(), err.Error()) }) @@ -128,7 +128,7 @@ func TestPushDeduplication(t *testing.T) { {Timestamp: time.Unix(1, 0), Line: "test"}, {Timestamp: time.Unix(1, 0), Line: "test"}, {Timestamp: time.Unix(1, 0), Line: "newer, better test"}, - }, recordPool.GetRecord(), 0, true, false) + }, recordPool.GetRecord(), 0, true, false, nil) require.NoError(t, err) require.Len(t, s.chunks, 1) require.Equal(t, s.chunks[0].chunk.Size(), 2, @@ -164,7 +164,7 @@ func TestPushRejectOldCounter(t *testing.T) { {Timestamp: time.Unix(1, 0), Line: "test"}, {Timestamp: time.Unix(1, 0), Line: "test"}, {Timestamp: time.Unix(1, 0), Line: "newer, better test"}, - }, recordPool.GetRecord(), 0, true, false) + }, recordPool.GetRecord(), 0, true, false, nil) require.NoError(t, err) require.Len(t, s.chunks, 1) require.Equal(t, s.chunks[0].chunk.Size(), 2, @@ -173,13 +173,13 @@ func TestPushRejectOldCounter(t *testing.T) { // fail to push with a counter <= the streams internal counter _, err = s.Push(context.Background(), []logproto.Entry{ {Timestamp: time.Unix(1, 0), Line: "test"}, - }, recordPool.GetRecord(), 2, true, false) + }, recordPool.GetRecord(), 2, true, false, nil) require.Equal(t, ErrEntriesExist, err) // succeed with a greater counter _, err = s.Push(context.Background(), []logproto.Entry{ {Timestamp: time.Unix(1, 0), Line: "test"}, - }, recordPool.GetRecord(), 3, true, false) + }, recordPool.GetRecord(), 3, true, false, nil) require.Nil(t, err) } @@ -270,9 +270,12 @@ func TestEntryErrorCorrectlyReported(t *testing.T) { {Line: "observability", Timestamp: time.Now().AddDate(-1 /* year */, 0 /* month */, 0 /* day */)}, {Line: "short", Timestamp: time.Now()}, } - _, failed := s.validateEntries(entries, false, true) + tracker := &mockUsageTracker{} + + _, failed := s.validateEntries(context.Background(), entries, false, true, tracker) require.NotEmpty(t, failed) require.False(t, hasRateLimitErr(failed)) + require.Equal(t, 13.0, tracker.discardedBytes) } func TestUnorderedPush(t *testing.T) { @@ -340,7 +343,7 @@ func TestUnorderedPush(t *testing.T) { if x.cutBefore { _ = s.cutChunk(context.Background()) } - written, err := s.Push(context.Background(), x.entries, recordPool.GetRecord(), 0, true, false) + written, err := s.Push(context.Background(), x.entries, recordPool.GetRecord(), 0, true, false, nil) if x.err { require.NotNil(t, err) } else { @@ -407,9 +410,11 @@ func TestPushRateLimit(t *testing.T) { {Timestamp: time.Unix(1, 0), Line: "aaaaaaaaab"}, } // Counter should be 2 now since the first line will be deduped. - _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, true) + tracker := &mockUsageTracker{} + _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, true, tracker) require.Error(t, err) require.Contains(t, err.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[1].Line))}).Error()) + require.Equal(t, 20.0, tracker.discardedBytes) } func TestPushRateLimitAllOrNothing(t *testing.T) { @@ -446,10 +451,12 @@ func TestPushRateLimitAllOrNothing(t *testing.T) { } // Both entries have errors because rate limiting is done all at once - _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, true) + tracker := &mockUsageTracker{} + _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, true, tracker) require.Error(t, err) require.Contains(t, err.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[0].Line))}).Error()) require.Contains(t, err.Error(), (&validation.ErrStreamRateLimit{RateLimit: l.PerStreamRateLimit, Labels: s.labelsString, Bytes: flagext.ByteSize(len(entries[1].Line))}).Error()) + require.Equal(t, 20.0, tracker.discardedBytes) } func TestReplayAppendIgnoresValidityWindow(t *testing.T) { @@ -484,7 +491,7 @@ func TestReplayAppendIgnoresValidityWindow(t *testing.T) { } // Push a first entry (it doesn't matter if we look like we're replaying or not) - _, err = s.Push(context.Background(), entries, nil, 1, true, false) + _, err = s.Push(context.Background(), entries, nil, 1, true, false, nil) require.Nil(t, err) // Create a sample outside the validity window @@ -493,11 +500,11 @@ func TestReplayAppendIgnoresValidityWindow(t *testing.T) { } // Pretend it's not a replay, ensure we error - _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, false) + _, err = s.Push(context.Background(), entries, recordPool.GetRecord(), 0, true, false, nil) require.NotNil(t, err) // Now pretend it's a replay. The same write should succeed. - _, err = s.Push(context.Background(), entries, nil, 2, true, false) + _, err = s.Push(context.Background(), entries, nil, 2, true, false, nil) require.Nil(t, err) } @@ -542,7 +549,7 @@ func Benchmark_PushStream(b *testing.B) { for n := 0; n < b.N; n++ { rec := recordPool.GetRecord() - _, err := s.Push(ctx, e, rec, 0, true, false) + _, err := s.Push(ctx, e, rec, 0, true, false, nil) require.NoError(b, err) recordPool.PutRecord(rec) } diff --git a/pkg/ingester/tailer.go b/pkg/ingester/tailer.go index 441c688612d9e..b39f42957360b 100644 --- a/pkg/ingester/tailer.go +++ b/pkg/ingester/tailer.go @@ -4,11 +4,11 @@ import ( "encoding/binary" "hash/fnv" "sync" - "sync/atomic" "time" "github.com/go-kit/log/level" "github.com/prometheus/prometheus/model/labels" + "go.uber.org/atomic" "golang.org/x/net/context" "github.com/grafana/loki/v3/pkg/logproto" diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 3561f89a23187..0280bd514d3c1 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -583,7 +583,7 @@ func (t *Loki) initIngester() (_ services.Service, err error) { level.Warn(util_log.Logger).Log("msg", "The config setting shutdown marker path is not set. The /ingester/prepare_shutdown endpoint won't work") } - t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Store, t.Overrides, t.tenantConfigs, prometheus.DefaultRegisterer, t.Cfg.Distributor.WriteFailuresLogging, t.Cfg.MetricsNamespace, logger) + t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Store, t.Overrides, t.tenantConfigs, prometheus.DefaultRegisterer, t.Cfg.Distributor.WriteFailuresLogging, t.Cfg.MetricsNamespace, logger, t.UsageTracker) if err != nil { return } From 88e545fc952d6ff55c61d079db920f00abc04865 Mon Sep 17 00:00:00 2001 From: Kaviraj Kanagaraj Date: Sun, 19 May 2024 17:40:25 +0200 Subject: [PATCH 35/47] feat(cache): Add `Cache-Control: no-cache` support for Loki instant queries. (#12896) Signed-off-by: Kaviraj --- cmd/logcli/main.go | 1 + pkg/chunkenc/interface.go | 2 +- pkg/logcli/client/client.go | 15 +- pkg/logql/evaluator.go | 76 +++- pkg/querier/queryrange/codec.go | 48 +- pkg/querier/queryrange/codec_test.go | 49 +++ pkg/querier/queryrange/downstreamer.go | 6 +- pkg/querier/queryrange/downstreamer_test.go | 80 ++++ pkg/querier/queryrange/queryrange.pb.go | 414 +++++++++++------- pkg/querier/queryrange/queryrange.proto | 3 + .../chunk/cache/resultscache/cache_test.go | 99 +++++ 11 files changed, 626 insertions(+), 167 deletions(-) diff --git a/cmd/logcli/main.go b/cmd/logcli/main.go index e4e04da8d6657..08fbcc195ba64 100644 --- a/cmd/logcli/main.go +++ b/cmd/logcli/main.go @@ -475,6 +475,7 @@ func newQueryClient(app *kingpin.Application) client.Client { app.Flag("key", "Path to the client certificate key. Can also be set using LOKI_CLIENT_KEY_PATH env var.").Default("").Envar("LOKI_CLIENT_KEY_PATH").StringVar(&client.TLSConfig.KeyFile) app.Flag("org-id", "adds X-Scope-OrgID to API requests for representing tenant ID. Useful for requesting tenant data when bypassing an auth gateway. Can also be set using LOKI_ORG_ID env var.").Default("").Envar("LOKI_ORG_ID").StringVar(&client.OrgID) app.Flag("query-tags", "adds X-Query-Tags http header to API requests. This header value will be part of `metrics.go` statistics. Useful for tracking the query. Can also be set using LOKI_QUERY_TAGS env var.").Default("").Envar("LOKI_QUERY_TAGS").StringVar(&client.QueryTags) + app.Flag("nocache", "adds Cache-Control: no-cache http header to API requests. Can also be set using LOKI_NO_CACHE env var.").Default("false").Envar("LOKI_NO_CACHE").BoolVar(&client.NoCache) app.Flag("bearer-token", "adds the Authorization header to API requests for authentication purposes. Can also be set using LOKI_BEARER_TOKEN env var.").Default("").Envar("LOKI_BEARER_TOKEN").StringVar(&client.BearerToken) app.Flag("bearer-token-file", "adds the Authorization header to API requests for authentication purposes. Can also be set using LOKI_BEARER_TOKEN_FILE env var.").Default("").Envar("LOKI_BEARER_TOKEN_FILE").StringVar(&client.BearerTokenFile) app.Flag("retries", "How many times to retry each query when getting an error response from Loki. Can also be set using LOKI_CLIENT_RETRIES env var.").Default("0").Envar("LOKI_CLIENT_RETRIES").IntVar(&client.Retries) diff --git a/pkg/chunkenc/interface.go b/pkg/chunkenc/interface.go index 8d6f5e1e8dd60..3825a6520af5f 100644 --- a/pkg/chunkenc/interface.go +++ b/pkg/chunkenc/interface.go @@ -24,7 +24,7 @@ var ( ) type errTooFarBehind struct { - // original timestmap of the entry itself. + // original timestamp of the entry itself. entryTs time.Time // cutoff is the oldest acceptable timstamp of the `stream` that entry belongs to. diff --git a/pkg/logcli/client/client.go b/pkg/logcli/client/client.go index e417ccfa3ce52..0c7880d62257c 100644 --- a/pkg/logcli/client/client.go +++ b/pkg/logcli/client/client.go @@ -39,6 +39,12 @@ const ( volumeRangePath = "/loki/api/v1/index/volume_range" detectedFieldsPath = "/loki/api/v1/detected_fields" defaultAuthHeader = "Authorization" + + // HTTP header keys + HTTPScopeOrgID = "X-Scope-OrgID" + HTTPQueryTags = "X-Query-Tags" + HTTPCacheControl = "Cache-Control" + HTTPCacheControlNoCache = "no-cache" ) var userAgent = fmt.Sprintf("loki-logcli/%s", build.Version) @@ -77,6 +83,7 @@ type DefaultClient struct { BearerTokenFile string Retries int QueryTags string + NoCache bool AuthHeader string ProxyURL string BackoffConfig BackoffConfig @@ -372,11 +379,15 @@ func (c *DefaultClient) getHTTPRequestHeader() (http.Header, error) { h.Set("User-Agent", userAgent) if c.OrgID != "" { - h.Set("X-Scope-OrgID", c.OrgID) + h.Set(HTTPScopeOrgID, c.OrgID) + } + + if c.NoCache { + h.Set(HTTPCacheControl, HTTPCacheControlNoCache) } if c.QueryTags != "" { - h.Set("X-Query-Tags", c.QueryTags) + h.Set(HTTPQueryTags, c.QueryTags) } if (c.Username != "" || c.Password != "") && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) { diff --git a/pkg/logql/evaluator.go b/pkg/logql/evaluator.go index ff887ff9b7529..e50d8739c30ad 100644 --- a/pkg/logql/evaluator.go +++ b/pkg/logql/evaluator.go @@ -18,6 +18,7 @@ import ( "github.com/grafana/loki/v3/pkg/logql/syntax" "github.com/grafana/loki/v3/pkg/logqlmodel" "github.com/grafana/loki/v3/pkg/querier/plan" + "github.com/grafana/loki/v3/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/v3/pkg/util" ) @@ -42,6 +43,7 @@ type Params interface { Shards() []string GetExpression() syntax.Expr GetStoreChunks() *logproto.ChunkRefGroup + CachingOptions() resultscache.CachingOptions } func NewLiteralParams( @@ -52,22 +54,70 @@ func NewLiteralParams( limit uint32, shards []string, storeChunks *logproto.ChunkRefGroup, +) (LiteralParams, error) { + return newLiteralParams( + qs, + start, + end, + step, + interval, + direction, + limit, + shards, + storeChunks, + resultscache.CachingOptions{}, + ) +} + +func NewLiteralParamsWithCaching( + qs string, + start, end time.Time, + step, interval time.Duration, + direction logproto.Direction, + limit uint32, + shards []string, + storeChunks *logproto.ChunkRefGroup, + cachingOptions resultscache.CachingOptions, +) (LiteralParams, error) { + return newLiteralParams( + qs, + start, + end, + step, + interval, + direction, + limit, + shards, + storeChunks, + cachingOptions, + ) +} + +func newLiteralParams( + qs string, + start, end time.Time, + step, interval time.Duration, + direction logproto.Direction, + limit uint32, + shards []string, + storeChunks *logproto.ChunkRefGroup, + cachingOptions resultscache.CachingOptions, ) (LiteralParams, error) { p := LiteralParams{ - queryString: qs, - start: start, - end: end, - step: step, - interval: interval, - direction: direction, - limit: limit, - shards: shards, - storeChunks: storeChunks, + queryString: qs, + start: start, + end: end, + step: step, + interval: interval, + direction: direction, + limit: limit, + shards: shards, + storeChunks: storeChunks, + cachingOptions: cachingOptions, } var err error p.queryExpr, err = syntax.ParseExpr(qs) return p, err - } // LiteralParams impls Params @@ -80,6 +130,7 @@ type LiteralParams struct { shards []string queryExpr syntax.Expr storeChunks *logproto.ChunkRefGroup + cachingOptions resultscache.CachingOptions } func (p LiteralParams) Copy() LiteralParams { return p } @@ -114,6 +165,11 @@ func (p LiteralParams) Shards() []string { return p.shards } // StoreChunks impls Params func (p LiteralParams) GetStoreChunks() *logproto.ChunkRefGroup { return p.storeChunks } +// CachingOptions returns whether Loki query created from this params should be cached. +func (p LiteralParams) CachingOptions() resultscache.CachingOptions { + return p.cachingOptions +} + // GetRangeType returns whether a query is an instant query or range query func GetRangeType(q Params) QueryRangeType { if q.Start() == q.End() && q.Step() == 0 { diff --git a/pkg/querier/queryrange/codec.go b/pkg/querier/queryrange/codec.go index 01ff8772a4c75..7bab6f6c5d054 100644 --- a/pkg/querier/queryrange/codec.go +++ b/pkg/querier/queryrange/codec.go @@ -42,6 +42,11 @@ import ( "github.com/grafana/loki/v3/pkg/util/querylimits" ) +const ( + cacheControlHeader = "Cache-Control" + noCacheVal = "no-cache" +) + var DefaultCodec = &Codec{} type Codec struct{} @@ -95,8 +100,6 @@ func (r *LokiRequest) LogToSpan(sp opentracing.Span) { ) } -func (*LokiRequest) GetCachingOptions() (res queryrangebase.CachingOptions) { return } - func (r *LokiInstantRequest) GetStep() int64 { return 0 } @@ -142,8 +145,6 @@ func (r *LokiInstantRequest) LogToSpan(sp opentracing.Span) { ) } -func (*LokiInstantRequest) GetCachingOptions() (res queryrangebase.CachingOptions) { return } - func (r *LokiSeriesRequest) GetEnd() time.Time { return r.EndTs } @@ -329,13 +330,18 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } + disableCacheReq := false + + if strings.ToLower(strings.TrimSpace(r.Header.Get(cacheControlHeader))) == noCacheVal { + disableCacheReq = true + } + switch op := getOperation(r.URL.Path); op { case QueryRangeOp: req, err := parseRangeQuery(r) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } - return req, nil case InstantQueryOp: req, err := parseInstantQuery(r) @@ -343,6 +349,10 @@ func (Codec) DecodeRequest(_ context.Context, r *http.Request, _ []string) (quer return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } + req.CachingOptions = queryrangebase.CachingOptions{ + Disabled: disableCacheReq, + } + return req, nil case SeriesOp: req, err := loghttp.ParseAndValidateSeriesQuery(r) @@ -1808,6 +1818,10 @@ func (p paramsRangeWrapper) Shards() []string { return p.GetShards() } +func (p paramsRangeWrapper) CachingOptions() resultscache.CachingOptions { + return resultscache.CachingOptions{} +} + type paramsInstantWrapper struct { *LokiInstantRequest } @@ -1840,6 +1854,10 @@ func (p paramsInstantWrapper) Shards() []string { return p.GetShards() } +func (p paramsInstantWrapper) CachingOptions() resultscache.CachingOptions { + return p.LokiInstantRequest.CachingOptions +} + type paramsSeriesWrapper struct { *LokiSeriesRequest } @@ -1876,6 +1894,10 @@ func (p paramsSeriesWrapper) GetStoreChunks() *logproto.ChunkRefGroup { return nil } +func (p paramsSeriesWrapper) CachingOptions() resultscache.CachingOptions { + return resultscache.CachingOptions{} +} + type paramsLabelWrapper struct { *LabelRequest } @@ -1912,6 +1934,10 @@ func (p paramsLabelWrapper) GetStoreChunks() *logproto.ChunkRefGroup { return nil } +func (p paramsLabelWrapper) CachingOptions() resultscache.CachingOptions { + return resultscache.CachingOptions{} +} + type paramsStatsWrapper struct { *logproto.IndexStatsRequest } @@ -1948,6 +1974,10 @@ func (p paramsStatsWrapper) GetStoreChunks() *logproto.ChunkRefGroup { return nil } +func (p paramsStatsWrapper) CachingOptions() resultscache.CachingOptions { + return resultscache.CachingOptions{} +} + type paramsDetectedFieldsWrapper struct { *DetectedFieldsRequest } @@ -2040,6 +2070,14 @@ func (p paramsDetectedFieldsWrapper) GetStoreChunks() *logproto.ChunkRefGroup { return nil } +func (p paramsDetectedLabelsWrapper) CachingOptions() resultscache.CachingOptions { + return resultscache.CachingOptions{} +} + +func (p paramsDetectedFieldsWrapper) CachingOptions() resultscache.CachingOptions { + return resultscache.CachingOptions{} +} + func httpResponseHeadersToPromResponseHeaders(httpHeaders http.Header) []queryrangebase.PrometheusResponseHeader { var promHeaders []queryrangebase.PrometheusResponseHeader for h, hv := range httpHeaders { diff --git a/pkg/querier/queryrange/codec_test.go b/pkg/querier/queryrange/codec_test.go index 2a67ac512bce2..1363792922382 100644 --- a/pkg/querier/queryrange/codec_test.go +++ b/pkg/querier/queryrange/codec_test.go @@ -219,6 +219,55 @@ func Test_codec_EncodeDecodeRequest(t *testing.T) { } } +func Test_codec_DecodeRequest_cacheHeader(t *testing.T) { + ctx := user.InjectOrgID(context.Background(), "1") + + tests := []struct { + name string + reqBuilder func() (*http.Request, error) + want queryrangebase.Request + }{ + { + "query_instant", + func() (*http.Request, error) { + req, err := http.NewRequest( + http.MethodGet, + fmt.Sprintf(`/v1/query?time=%d&query={foo="bar"}&limit=200&direction=FORWARD`, start.UnixNano()), + nil, + ) + if err == nil { + req.Header.Set(cacheControlHeader, noCacheVal) + } + return req, err + }, + &LokiInstantRequest{ + Query: `{foo="bar"}`, + Limit: 200, + Direction: logproto.FORWARD, + Path: "/v1/query", + TimeTs: start, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(`{foo="bar"}`), + }, + CachingOptions: queryrangebase.CachingOptions{ + Disabled: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req, err := tt.reqBuilder() + if err != nil { + t.Fatal(err) + } + got, err := DefaultCodec.DecodeRequest(ctx, req, nil) + require.NoError(t, err) + require.Equal(t, tt.want, got) + }) + } +} + func Test_codec_DecodeResponse(t *testing.T) { tests := []struct { name string diff --git a/pkg/querier/queryrange/downstreamer.go b/pkg/querier/queryrange/downstreamer.go index cf1cfc36dc4ac..9f946a3247e98 100644 --- a/pkg/querier/queryrange/downstreamer.go +++ b/pkg/querier/queryrange/downstreamer.go @@ -45,7 +45,8 @@ func ParamsToLokiRequest(params logql.Params) queryrangebase.Request { Plan: &plan.QueryPlan{ AST: params.GetExpression(), }, - StoreChunks: params.GetStoreChunks(), + StoreChunks: params.GetStoreChunks(), + CachingOptions: params.CachingOptions(), } } return &LokiRequest{ @@ -61,7 +62,8 @@ func ParamsToLokiRequest(params logql.Params) queryrangebase.Request { Plan: &plan.QueryPlan{ AST: params.GetExpression(), }, - StoreChunks: params.GetStoreChunks(), + StoreChunks: params.GetStoreChunks(), + CachingOptions: params.CachingOptions(), } } diff --git a/pkg/querier/queryrange/downstreamer_test.go b/pkg/querier/queryrange/downstreamer_test.go index 979cc5a04e002..a10913f223736 100644 --- a/pkg/querier/queryrange/downstreamer_test.go +++ b/pkg/querier/queryrange/downstreamer_test.go @@ -22,7 +22,9 @@ import ( "github.com/grafana/loki/v3/pkg/logql/syntax" "github.com/grafana/loki/v3/pkg/logqlmodel" "github.com/grafana/loki/v3/pkg/logqlmodel/stats" + "github.com/grafana/loki/v3/pkg/querier/plan" "github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase" + "github.com/grafana/loki/v3/pkg/storage/chunk/cache/resultscache" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index" ) @@ -328,6 +330,84 @@ func TestInstanceFor(t *testing.T) { ensureParallelism(t, in, in.parallelism) } +func TestParamsToLokiRequest(t *testing.T) { + // Usually, queryrangebase.Request converted into Params and passed to downstream engine + // And converted back to queryrangebase.Request from the params before executing those queries. + // This test makes sure, we don't loose `CachingOption` during this transformation. + + ts := time.Now() + qs := `sum(rate({foo="bar"}[2h] offset 1h))` + + cases := []struct { + name string + caching resultscache.CachingOptions + expReq queryrangebase.Request + }{ + { + "instant-query-cache-enabled", + resultscache.CachingOptions{ + Disabled: false, + }, + &LokiInstantRequest{ + Query: qs, + Limit: 1000, + TimeTs: ts, + Direction: logproto.BACKWARD, + Path: "/loki/api/v1/query", + Shards: nil, + StoreChunks: nil, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(qs), + }, + CachingOptions: resultscache.CachingOptions{ + Disabled: false, + }, + }, + }, + { + "instant-query-cache-disabled", + resultscache.CachingOptions{ + Disabled: true, + }, + &LokiInstantRequest{ + Query: qs, + Limit: 1000, + TimeTs: ts, + Direction: logproto.BACKWARD, + Path: "/loki/api/v1/query", + Shards: nil, + StoreChunks: nil, + Plan: &plan.QueryPlan{ + AST: syntax.MustParseExpr(qs), + }, + CachingOptions: resultscache.CachingOptions{ + Disabled: true, + }, + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + params, err := logql.NewLiteralParamsWithCaching( + `sum(rate({foo="bar"}[2h] offset 1h))`, + ts, + ts, + 0, + 0, + logproto.BACKWARD, + 1000, + nil, + nil, + tc.caching, + ) + require.NoError(t, err) + req := ParamsToLokiRequest(params) + require.Equal(t, tc.expReq, req) + }) + } +} + func TestInstanceDownstream(t *testing.T) { t.Run("Downstream simple query", func(t *testing.T) { ts := time.Unix(1, 0) diff --git a/pkg/querier/queryrange/queryrange.pb.go b/pkg/querier/queryrange/queryrange.pb.go index e295fac92c6d7..ae2dddee539a9 100644 --- a/pkg/querier/queryrange/queryrange.pb.go +++ b/pkg/querier/queryrange/queryrange.pb.go @@ -21,6 +21,7 @@ import ( queryrangebase "github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase" _ "github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase/definitions" github_com_grafana_loki_v3_pkg_querier_queryrange_queryrangebase_definitions "github.com/grafana/loki/v3/pkg/querier/queryrange/queryrangebase/definitions" + resultscache "github.com/grafana/loki/v3/pkg/storage/chunk/cache/resultscache" io "io" math "math" math_bits "math/bits" @@ -54,7 +55,8 @@ type LokiRequest struct { Plan *github_com_grafana_loki_v3_pkg_querier_plan.QueryPlan `protobuf:"bytes,10,opt,name=plan,proto3,customtype=github.com/grafana/loki/v3/pkg/querier/plan.QueryPlan" json:"plan,omitempty"` // If populated, these represent the chunk references that the querier should // use to fetch the data, plus any other chunks reported by ingesters. - StoreChunks *logproto.ChunkRefGroup `protobuf:"bytes,11,opt,name=storeChunks,proto3" json:"storeChunks"` + StoreChunks *logproto.ChunkRefGroup `protobuf:"bytes,11,opt,name=storeChunks,proto3" json:"storeChunks"` + CachingOptions resultscache.CachingOptions `protobuf:"bytes,12,opt,name=cachingOptions,proto3" json:"cachingOptions"` } func (m *LokiRequest) Reset() { *m = LokiRequest{} } @@ -159,6 +161,13 @@ func (m *LokiRequest) GetStoreChunks() *logproto.ChunkRefGroup { return nil } +func (m *LokiRequest) GetCachingOptions() resultscache.CachingOptions { + if m != nil { + return m.CachingOptions + } + return resultscache.CachingOptions{} +} + type LokiInstantRequest struct { Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` Limit uint32 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` @@ -169,7 +178,8 @@ type LokiInstantRequest struct { Plan *github_com_grafana_loki_v3_pkg_querier_plan.QueryPlan `protobuf:"bytes,7,opt,name=plan,proto3,customtype=github.com/grafana/loki/v3/pkg/querier/plan.QueryPlan" json:"plan,omitempty"` // If populated, these represent the chunk references that the querier should // use to fetch the data, plus any other chunks reported by ingesters. - StoreChunks *logproto.ChunkRefGroup `protobuf:"bytes,8,opt,name=storeChunks,proto3" json:"storeChunks"` + StoreChunks *logproto.ChunkRefGroup `protobuf:"bytes,8,opt,name=storeChunks,proto3" json:"storeChunks"` + CachingOptions resultscache.CachingOptions `protobuf:"bytes,9,opt,name=cachingOptions,proto3" json:"cachingOptions"` } func (m *LokiInstantRequest) Reset() { *m = LokiInstantRequest{} } @@ -253,6 +263,13 @@ func (m *LokiInstantRequest) GetStoreChunks() *logproto.ChunkRefGroup { return nil } +func (m *LokiInstantRequest) GetCachingOptions() resultscache.CachingOptions { + if m != nil { + return m.CachingOptions + } + return resultscache.CachingOptions{} +} + type Plan struct { Raw []byte `protobuf:"bytes,1,opt,name=raw,proto3" json:"raw,omitempty"` } @@ -1471,125 +1488,128 @@ func init() { } var fileDescriptor_51b9d53b40d11902 = []byte{ - // 1879 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0xcb, 0x6f, 0x1c, 0x49, - 0x19, 0x9f, 0x9e, 0xa7, 0xe7, 0xf3, 0x23, 0xa6, 0x62, 0xbc, 0x8d, 0x77, 0x77, 0x7a, 0x18, 0x89, - 0x5d, 0x83, 0x60, 0x86, 0xd8, 0xbb, 0x61, 0xd7, 0x84, 0x68, 0xd3, 0xeb, 0x04, 0x67, 0xc9, 0x42, - 0xb6, 0x6d, 0x71, 0xe0, 0x82, 0xca, 0x9e, 0xf2, 0x4c, 0xe3, 0x99, 0xee, 0x4e, 0x57, 0x8d, 0x13, - 0x4b, 0x08, 0xed, 0x3f, 0xb0, 0x62, 0x4f, 0xfc, 0x09, 0x88, 0x1b, 0x17, 0x4e, 0x9c, 0x38, 0x86, - 0x03, 0x52, 0x8e, 0xab, 0x91, 0x68, 0x88, 0x83, 0x10, 0xf2, 0x69, 0x25, 0xae, 0x20, 0xa1, 0x7a, - 0x74, 0x4f, 0xd5, 0xcc, 0x98, 0x8c, 0x13, 0x09, 0xc9, 0xc0, 0xc5, 0x53, 0x8f, 0xef, 0xf7, 0x75, - 0xd5, 0xef, 0x7b, 0xd4, 0x57, 0x65, 0x78, 0x33, 0x3a, 0xea, 0xb4, 0x1e, 0x0c, 0x48, 0xec, 0x93, - 0x58, 0xfc, 0x9e, 0xc4, 0x38, 0xe8, 0x10, 0xad, 0xd9, 0x8c, 0xe2, 0x90, 0x85, 0x08, 0x46, 0x23, - 0x6b, 0x1b, 0x1d, 0x9f, 0x75, 0x07, 0xfb, 0xcd, 0x83, 0xb0, 0xdf, 0xea, 0x84, 0x9d, 0xb0, 0xd5, - 0x09, 0xc3, 0x4e, 0x8f, 0xe0, 0xc8, 0xa7, 0xaa, 0xd9, 0x8a, 0xa3, 0x83, 0x16, 0x65, 0x98, 0x0d, - 0xa8, 0xc4, 0xaf, 0xad, 0x70, 0x41, 0xd1, 0x14, 0x10, 0x35, 0xea, 0x28, 0x71, 0xd1, 0xdb, 0x1f, - 0x1c, 0xb6, 0x98, 0xdf, 0x27, 0x94, 0xe1, 0x7e, 0x94, 0x0a, 0xf0, 0xf5, 0xf5, 0xc2, 0x8e, 0x44, - 0xfa, 0x41, 0x9b, 0x3c, 0xea, 0x60, 0x46, 0x1e, 0xe2, 0x13, 0x25, 0xf0, 0xaa, 0x21, 0x90, 0x36, - 0xd4, 0xe4, 0x9a, 0x31, 0x19, 0x61, 0xc6, 0x48, 0x1c, 0xa8, 0xb9, 0x2f, 0x19, 0x73, 0xf4, 0x88, - 0xb0, 0x83, 0xae, 0x9a, 0xaa, 0xab, 0xa9, 0x07, 0xbd, 0x7e, 0xd8, 0x26, 0x3d, 0xb1, 0x11, 0x2a, - 0xff, 0x2a, 0x89, 0xab, 0x5c, 0x22, 0x1a, 0xd0, 0xae, 0xf8, 0xa3, 0x06, 0xdf, 0x7f, 0x2e, 0x97, - 0xfb, 0x98, 0x92, 0x56, 0x9b, 0x1c, 0xfa, 0x81, 0xcf, 0xfc, 0x30, 0xa0, 0x7a, 0x5b, 0x29, 0xb9, - 0x3e, 0x9b, 0x92, 0x71, 0xfb, 0x34, 0xfe, 0x59, 0x80, 0xf9, 0x7b, 0xe1, 0x91, 0xef, 0x91, 0x07, - 0x03, 0x42, 0x19, 0x5a, 0x81, 0x92, 0x90, 0xb1, 0xad, 0xba, 0xb5, 0x5e, 0xf5, 0x64, 0x87, 0x8f, - 0xf6, 0xfc, 0xbe, 0xcf, 0xec, 0x7c, 0xdd, 0x5a, 0x5f, 0xf4, 0x64, 0x07, 0x21, 0x28, 0x52, 0x46, - 0x22, 0xbb, 0x50, 0xb7, 0xd6, 0x0b, 0x9e, 0x68, 0xa3, 0x35, 0x98, 0xf3, 0x03, 0x46, 0xe2, 0x63, - 0xdc, 0xb3, 0xab, 0x62, 0x3c, 0xeb, 0xa3, 0x9b, 0x50, 0xa1, 0x0c, 0xc7, 0x6c, 0x8f, 0xda, 0xc5, - 0xba, 0xb5, 0x3e, 0xbf, 0xb1, 0xd6, 0x94, 0x76, 0x6c, 0xa6, 0x76, 0x6c, 0xee, 0xa5, 0x76, 0x74, - 0xe7, 0x1e, 0x27, 0x4e, 0xee, 0xd3, 0x3f, 0x39, 0x96, 0x97, 0x82, 0xd0, 0x16, 0x94, 0x48, 0xd0, - 0xde, 0xa3, 0x76, 0xe9, 0x02, 0x68, 0x09, 0x41, 0xd7, 0xa0, 0xda, 0xf6, 0x63, 0x72, 0xc0, 0x39, - 0xb3, 0xcb, 0x75, 0x6b, 0x7d, 0x69, 0xe3, 0x6a, 0x33, 0x33, 0xfb, 0x76, 0x3a, 0xe5, 0x8d, 0xa4, - 0xf8, 0xf6, 0x22, 0xcc, 0xba, 0x76, 0x45, 0x30, 0x21, 0xda, 0xa8, 0x01, 0x65, 0xda, 0xc5, 0x71, - 0x9b, 0xda, 0x73, 0xf5, 0xc2, 0x7a, 0xd5, 0x85, 0xb3, 0xc4, 0x51, 0x23, 0x9e, 0xfa, 0x45, 0x3f, - 0x86, 0x62, 0xd4, 0xc3, 0x81, 0x0d, 0x62, 0x95, 0xcb, 0x4d, 0x8d, 0xf3, 0xfb, 0x3d, 0x1c, 0xb8, - 0xef, 0x0e, 0x13, 0xe7, 0x6d, 0x3d, 0x14, 0x62, 0x7c, 0x88, 0x03, 0xdc, 0xea, 0x85, 0x47, 0x7e, - 0xeb, 0x78, 0xb3, 0xa5, 0x5b, 0x92, 0x2b, 0x6a, 0x7e, 0xc4, 0x15, 0x70, 0xa8, 0x27, 0x14, 0xa3, - 0x0f, 0x60, 0x9e, 0xb2, 0x30, 0x26, 0xef, 0x77, 0x07, 0xc1, 0x11, 0xb5, 0xe7, 0xc5, 0x77, 0x5e, - 0x19, 0xed, 0x46, 0x8c, 0x7b, 0xe4, 0xf0, 0xbb, 0x71, 0x38, 0x88, 0xdc, 0x2b, 0x67, 0x89, 0xa3, - 0xcb, 0x7b, 0x7a, 0xa7, 0xf1, 0x8b, 0x02, 0x20, 0x6e, 0xff, 0xbb, 0x01, 0x65, 0x38, 0x60, 0x2f, - 0xe2, 0x06, 0x37, 0xa0, 0xcc, 0xc3, 0x6f, 0x8f, 0x0a, 0x47, 0x98, 0xd5, 0x2e, 0x0a, 0x63, 0x1a, - 0xa6, 0x78, 0x21, 0xc3, 0x94, 0xa6, 0x1a, 0xa6, 0xfc, 0x5c, 0xc3, 0x54, 0xfe, 0x43, 0x86, 0x99, - 0x7b, 0x19, 0xc3, 0xd8, 0x50, 0xe4, 0x9a, 0xd1, 0x32, 0x14, 0x62, 0xfc, 0x50, 0xd8, 0x61, 0xc1, - 0xe3, 0xcd, 0xc6, 0x69, 0x11, 0x16, 0x64, 0xc8, 0xd2, 0x28, 0x0c, 0x28, 0xe1, 0x7b, 0xdf, 0x15, - 0x39, 0x53, 0x5a, 0x4b, 0xed, 0x5d, 0x8c, 0x78, 0x6a, 0x06, 0xbd, 0x07, 0xc5, 0x6d, 0xcc, 0xb0, - 0xb0, 0xdc, 0xfc, 0xc6, 0x8a, 0xbe, 0x77, 0xae, 0x8b, 0xcf, 0xb9, 0xab, 0xdc, 0x38, 0x67, 0x89, - 0xb3, 0xd4, 0xc6, 0x0c, 0x7f, 0x3d, 0xec, 0xfb, 0x8c, 0xf4, 0x23, 0x76, 0xe2, 0x09, 0x24, 0x7a, - 0x1b, 0xaa, 0xb7, 0xe3, 0x38, 0x8c, 0xf7, 0x4e, 0x22, 0x22, 0x2c, 0x5d, 0x75, 0x5f, 0x39, 0x4b, - 0x9c, 0xab, 0x24, 0x1d, 0xd4, 0x10, 0x23, 0x49, 0xf4, 0x55, 0x28, 0x89, 0x8e, 0xb0, 0x6d, 0xd5, - 0xbd, 0x7a, 0x96, 0x38, 0x57, 0x04, 0x44, 0x13, 0x97, 0x12, 0xa6, 0x2b, 0x94, 0x66, 0x72, 0x85, - 0xcc, 0x23, 0xcb, 0xba, 0x47, 0xda, 0x50, 0x39, 0x26, 0x31, 0xe5, 0x6a, 0x2a, 0x62, 0x3c, 0xed, - 0xa2, 0x5b, 0x00, 0x9c, 0x18, 0x9f, 0x32, 0xff, 0x20, 0x35, 0xd0, 0x62, 0x53, 0xa6, 0x68, 0x8f, - 0xd0, 0x41, 0x8f, 0xb9, 0x48, 0xb1, 0xa0, 0x09, 0x7a, 0x5a, 0x1b, 0xfd, 0xda, 0x82, 0xca, 0x0e, - 0xc1, 0x6d, 0x12, 0x53, 0xbb, 0x5a, 0x2f, 0xac, 0xcf, 0x6f, 0x7c, 0xa5, 0xa9, 0xe7, 0xe3, 0xfb, - 0x71, 0xd8, 0x27, 0xac, 0x4b, 0x06, 0x34, 0x35, 0x90, 0x94, 0x76, 0x83, 0x61, 0xe2, 0x90, 0x19, - 0xdd, 0x6b, 0xa6, 0x63, 0xe0, 0xdc, 0x4f, 0x9d, 0x25, 0x8e, 0xf5, 0x0d, 0x2f, 0x5d, 0x25, 0xda, - 0x80, 0xb9, 0x87, 0x38, 0x0e, 0xfc, 0xa0, 0x43, 0x6d, 0x10, 0xd1, 0xb1, 0x7a, 0x96, 0x38, 0x28, - 0x1d, 0xd3, 0x0c, 0x91, 0xc9, 0x35, 0xfe, 0x68, 0xc1, 0x17, 0xb8, 0x63, 0xec, 0xf2, 0xf5, 0x50, - 0x2d, 0x2d, 0xf4, 0x31, 0x3b, 0xe8, 0xda, 0x16, 0x57, 0xe3, 0xc9, 0x8e, 0x9e, 0xd7, 0xf3, 0x2f, - 0x95, 0xd7, 0x0b, 0x17, 0xcf, 0xeb, 0x69, 0x2e, 0x28, 0x4e, 0xcd, 0x05, 0xa5, 0xf3, 0x72, 0x41, - 0xe3, 0xe7, 0x2a, 0xef, 0xa5, 0xfb, 0xbb, 0x40, 0x28, 0xdd, 0xc9, 0x42, 0xa9, 0x20, 0x56, 0x9b, - 0x79, 0xa8, 0xd4, 0x75, 0xb7, 0x4d, 0x02, 0xe6, 0x1f, 0xfa, 0x24, 0x7e, 0x4e, 0x40, 0x69, 0x5e, - 0x5a, 0x30, 0xbd, 0x54, 0x77, 0xb1, 0xe2, 0xa5, 0x70, 0x31, 0x33, 0xae, 0x4a, 0x2f, 0x10, 0x57, - 0x8d, 0xbf, 0xe7, 0x61, 0x95, 0x5b, 0xe4, 0x1e, 0xde, 0x27, 0xbd, 0xef, 0xe3, 0xfe, 0x05, 0xad, - 0xf2, 0x86, 0x66, 0x95, 0xaa, 0x8b, 0xfe, 0xcf, 0xfa, 0x6c, 0xac, 0xff, 0xd2, 0x82, 0xb9, 0xf4, - 0x00, 0x40, 0x4d, 0x00, 0x09, 0x13, 0x39, 0x5e, 0x72, 0xbd, 0xc4, 0xc1, 0x71, 0x36, 0xea, 0x69, - 0x12, 0xe8, 0x27, 0x50, 0x96, 0x3d, 0x15, 0x0b, 0xda, 0x51, 0xb7, 0xcb, 0x62, 0x82, 0xfb, 0xb7, - 0xda, 0x38, 0x62, 0x24, 0x76, 0xdf, 0xe5, 0xab, 0x18, 0x26, 0xce, 0x9b, 0xe7, 0xb1, 0x94, 0xd6, - 0xc5, 0x0a, 0xc7, 0xed, 0x2b, 0xbf, 0xe9, 0xa9, 0x2f, 0x34, 0x3e, 0xb1, 0x60, 0x99, 0x2f, 0x94, - 0x53, 0x93, 0x39, 0xc6, 0x36, 0xcc, 0xc5, 0xaa, 0x2d, 0x96, 0x3b, 0xbf, 0xd1, 0x68, 0x9a, 0xb4, - 0x4e, 0xa1, 0xd2, 0x2d, 0x3e, 0x4e, 0x1c, 0xcb, 0xcb, 0x90, 0x68, 0xd3, 0xa0, 0x31, 0x3f, 0x8d, - 0x46, 0x0e, 0xc9, 0x19, 0xc4, 0xfd, 0x2e, 0x0f, 0xe8, 0x2e, 0xbf, 0x57, 0x70, 0xff, 0x1b, 0xb9, - 0xea, 0xa3, 0x89, 0x15, 0xbd, 0x36, 0x22, 0x65, 0x52, 0xde, 0xbd, 0x39, 0x4c, 0x9c, 0xad, 0xe7, - 0xf8, 0xce, 0xbf, 0xc1, 0x6b, 0xbb, 0xd0, 0xdd, 0x37, 0x7f, 0x19, 0xdc, 0xb7, 0xf1, 0x9b, 0x3c, - 0x2c, 0xfd, 0x30, 0xec, 0x0d, 0xfa, 0x24, 0xa3, 0x2f, 0x9a, 0xa0, 0xcf, 0x1e, 0xd1, 0x67, 0xca, - 0xba, 0x5b, 0xc3, 0xc4, 0xb9, 0x3e, 0x2b, 0x75, 0x26, 0xf6, 0x52, 0xd3, 0xf6, 0xd7, 0x3c, 0xac, - 0xec, 0x85, 0xd1, 0xf7, 0x76, 0xc5, 0xdd, 0x53, 0x4b, 0x93, 0xdd, 0x09, 0xf2, 0x56, 0x46, 0xe4, - 0x71, 0xc4, 0x87, 0x98, 0xc5, 0xfe, 0x23, 0xf7, 0xfa, 0x30, 0x71, 0x36, 0x66, 0x25, 0x6e, 0x84, - 0xbb, 0xcc, 0xa4, 0x19, 0x35, 0x50, 0x61, 0xc6, 0x1a, 0xe8, 0x1f, 0x79, 0x58, 0xfd, 0x68, 0x80, - 0x03, 0xe6, 0xf7, 0x88, 0x24, 0x3b, 0xa3, 0xfa, 0xa7, 0x13, 0x54, 0xd7, 0x46, 0x54, 0x9b, 0x18, - 0x45, 0xfa, 0x7b, 0xc3, 0xc4, 0xb9, 0x31, 0x2b, 0xe9, 0xd3, 0x34, 0xfc, 0xcf, 0xd1, 0xff, 0xdb, - 0x3c, 0x2c, 0xed, 0xca, 0xaa, 0x2d, 0xdd, 0xf8, 0xf1, 0x14, 0xda, 0xf5, 0xc7, 0x9d, 0x68, 0xbf, - 0x69, 0x22, 0x2e, 0x96, 0x24, 0x4c, 0xec, 0xa5, 0x4e, 0x12, 0x7f, 0xc8, 0xc3, 0xea, 0x36, 0x61, - 0xe4, 0x80, 0x91, 0xf6, 0x1d, 0x9f, 0xf4, 0x34, 0x12, 0x3f, 0xb6, 0x26, 0x58, 0xac, 0x6b, 0xd7, - 0xac, 0xa9, 0x20, 0xd7, 0x1d, 0x26, 0xce, 0xcd, 0x59, 0x79, 0x9c, 0xae, 0xe3, 0x52, 0xf3, 0xf9, - 0xfb, 0x3c, 0x7c, 0x51, 0x5e, 0xf7, 0xe5, 0x6b, 0xe0, 0x88, 0xce, 0x9f, 0x4d, 0xb0, 0xe9, 0xe8, - 0xa9, 0x60, 0x0a, 0xc4, 0xbd, 0x35, 0x4c, 0x9c, 0xef, 0xcc, 0x9e, 0x0b, 0xa6, 0xa8, 0xf8, 0xaf, - 0xf1, 0x4d, 0x51, 0xed, 0x5f, 0xd4, 0x37, 0x4d, 0xd0, 0x8b, 0xf9, 0xa6, 0xa9, 0xe3, 0x52, 0xf3, - 0xf9, 0x97, 0x32, 0x2c, 0x0a, 0x2f, 0xc9, 0x68, 0xfc, 0x1a, 0xa8, 0xeb, 0x91, 0xe2, 0x10, 0xa5, - 0x57, 0xea, 0x38, 0x3a, 0x68, 0xee, 0xaa, 0x8b, 0x93, 0x94, 0x40, 0xef, 0x40, 0x99, 0x8a, 0x8b, - 0xab, 0xaa, 0x7c, 0x6b, 0xe3, 0x6f, 0x43, 0xe6, 0x15, 0x79, 0x27, 0xe7, 0x29, 0x79, 0x74, 0x03, - 0xca, 0x3d, 0xc1, 0xa2, 0xba, 0xb8, 0x37, 0xc6, 0x91, 0x93, 0x57, 0x39, 0x8e, 0x96, 0x18, 0x74, - 0x1d, 0x4a, 0xa2, 0xc4, 0x56, 0x6f, 0xc1, 0xc6, 0x67, 0x27, 0x0b, 0xdd, 0x9d, 0x9c, 0x27, 0xc5, - 0xd1, 0x06, 0x14, 0xa3, 0x38, 0xec, 0xab, 0xeb, 0xce, 0x6b, 0xe3, 0xdf, 0xd4, 0xef, 0x07, 0x3b, - 0x39, 0x4f, 0xc8, 0xa2, 0xb7, 0xa0, 0x42, 0xc5, 0xc5, 0x82, 0x8a, 0x87, 0x22, 0x5e, 0x55, 0x8e, - 0xc1, 0x34, 0x48, 0x2a, 0x8a, 0xde, 0x82, 0xf2, 0xb1, 0x28, 0x1b, 0xd5, 0x8b, 0xe1, 0x9a, 0x0e, - 0x32, 0x0b, 0x4a, 0xbe, 0x2f, 0x29, 0x8b, 0xee, 0xc0, 0x02, 0x0b, 0xa3, 0xa3, 0xb4, 0x3a, 0x53, - 0x8f, 0x4c, 0x75, 0x1d, 0x3b, 0xad, 0x7a, 0xdb, 0xc9, 0x79, 0x06, 0x0e, 0xdd, 0x87, 0xe5, 0x07, - 0x46, 0x19, 0x40, 0xa8, 0x78, 0x51, 0x1f, 0xe3, 0x79, 0x7a, 0x81, 0xb2, 0x93, 0xf3, 0x26, 0xd0, - 0x68, 0x1b, 0x96, 0xa8, 0x71, 0xc2, 0xa9, 0x27, 0x6a, 0x63, 0x5f, 0xe6, 0x19, 0xb8, 0x93, 0xf3, - 0xc6, 0x30, 0xe8, 0x1e, 0x2c, 0xb5, 0x8d, 0xfc, 0xae, 0x1e, 0xa0, 0x8d, 0x55, 0x4d, 0x3f, 0x01, - 0xb8, 0x36, 0x13, 0x8b, 0x7e, 0x00, 0xcb, 0xd1, 0x58, 0x6e, 0xb3, 0x17, 0x84, 0xbe, 0x2f, 0x9b, - 0xbb, 0x9c, 0x92, 0x04, 0xf9, 0x26, 0xc7, 0xc1, 0xfa, 0xf2, 0x64, 0x88, 0xdb, 0x8b, 0xe7, 0x2f, - 0xcf, 0x4c, 0x02, 0xfa, 0xf2, 0xe4, 0x8c, 0x0b, 0xa3, 0x74, 0xd4, 0xf8, 0xa4, 0x0c, 0x0b, 0x2a, - 0xcc, 0xe4, 0x6b, 0xd8, 0xb7, 0xb2, 0xc8, 0x91, 0x51, 0xf6, 0xfa, 0x79, 0x91, 0x23, 0xc4, 0xb5, - 0xc0, 0xf9, 0x66, 0x16, 0x38, 0x32, 0xe4, 0x56, 0x47, 0x29, 0x4e, 0x7c, 0x57, 0x43, 0xa8, 0x60, - 0xd9, 0x4c, 0x83, 0x45, 0x46, 0xda, 0xab, 0xd3, 0xef, 0x94, 0x29, 0x4a, 0x45, 0xca, 0x16, 0x54, - 0x7c, 0xf9, 0xac, 0x3f, 0x2d, 0xc6, 0x26, 0x5f, 0xfd, 0xb9, 0xef, 0x2b, 0x00, 0xda, 0x1c, 0x45, - 0x4c, 0x49, 0x3d, 0x63, 0x4f, 0x44, 0x4c, 0x06, 0x4a, 0x03, 0xe6, 0x5a, 0x16, 0x30, 0xe5, 0xf1, - 0xa7, 0xef, 0x34, 0x5c, 0xb2, 0x8d, 0xa9, 0x68, 0xb9, 0x0d, 0x8b, 0xa9, 0x7f, 0x89, 0x29, 0x15, - 0x2e, 0xaf, 0x9f, 0x57, 0xd6, 0xa5, 0x78, 0x13, 0x85, 0xee, 0x4e, 0x38, 0x65, 0x75, 0xfc, 0x28, - 0x1e, 0x77, 0xc9, 0x54, 0xd3, 0xb8, 0x47, 0x7e, 0x00, 0x57, 0x46, 0x4e, 0x25, 0xd7, 0x04, 0x93, - 0x15, 0xbe, 0xe1, 0x8e, 0xa9, 0xaa, 0x71, 0xa0, 0xbe, 0x2c, 0xe5, 0x8c, 0xf3, 0xe7, 0x2d, 0x2b, - 0x75, 0xc5, 0x89, 0x65, 0xc9, 0x09, 0xb4, 0x03, 0x73, 0x7d, 0xc2, 0x70, 0x1b, 0x33, 0x6c, 0x57, - 0xc4, 0xb1, 0xf4, 0xc6, 0x44, 0x80, 0x28, 0x74, 0xf3, 0x43, 0x25, 0x78, 0x3b, 0x60, 0xf1, 0x89, - 0x7a, 0xbb, 0xc8, 0xd0, 0x6b, 0xdf, 0x86, 0x45, 0x43, 0x00, 0x2d, 0x43, 0xe1, 0x88, 0xa4, 0xff, - 0xea, 0xe1, 0x4d, 0xb4, 0x02, 0xa5, 0x63, 0xdc, 0x1b, 0x10, 0xe1, 0x9f, 0x55, 0x4f, 0x76, 0xb6, - 0xf2, 0xef, 0x58, 0x6e, 0x15, 0x2a, 0xb1, 0xfc, 0x8a, 0xdb, 0x79, 0xf2, 0xb4, 0x96, 0xfb, 0xec, - 0x69, 0x2d, 0xf7, 0xf9, 0xd3, 0x9a, 0xf5, 0xf1, 0x69, 0xcd, 0xfa, 0xd5, 0x69, 0xcd, 0x7a, 0x7c, - 0x5a, 0xb3, 0x9e, 0x9c, 0xd6, 0xac, 0x3f, 0x9f, 0xd6, 0xac, 0xbf, 0x9d, 0xd6, 0x72, 0x9f, 0x9f, - 0xd6, 0xac, 0x4f, 0x9f, 0xd5, 0x72, 0x4f, 0x9e, 0xd5, 0x72, 0x9f, 0x3d, 0xab, 0xe5, 0x7e, 0x74, - 0xed, 0xc2, 0x27, 0xe4, 0x7e, 0x59, 0x30, 0xb5, 0xf9, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd2, - 0x2b, 0x01, 0x83, 0x75, 0x1e, 0x00, 0x00, + // 1935 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x59, 0xcb, 0x6f, 0x23, 0x49, + 0x19, 0x77, 0xfb, 0x19, 0x7f, 0x79, 0x4c, 0xa8, 0x09, 0xd9, 0x26, 0x3b, 0xeb, 0x36, 0x96, 0xd8, + 0x0d, 0x08, 0xec, 0x1d, 0x67, 0x77, 0xd8, 0x0d, 0xc3, 0x68, 0xa7, 0x27, 0x33, 0x64, 0x86, 0x59, + 0x76, 0xb6, 0x13, 0x71, 0xe0, 0x82, 0x2a, 0x76, 0xc5, 0x6e, 0x62, 0x77, 0xf7, 0x74, 0x97, 0x33, + 0x13, 0x09, 0xa1, 0xfd, 0x07, 0x56, 0xec, 0x5f, 0x81, 0xb8, 0x71, 0xe1, 0xc4, 0x89, 0xe3, 0xee, + 0x01, 0x69, 0x8e, 0x2b, 0x4b, 0x34, 0x8c, 0x07, 0x21, 0x94, 0xd3, 0x4a, 0x5c, 0x39, 0xa0, 0x7a, + 0x74, 0xbb, 0xca, 0x76, 0x18, 0x27, 0x20, 0xa4, 0xc0, 0x5e, 0x92, 0x7a, 0x7c, 0xbf, 0xea, 0xaa, + 0xdf, 0xf7, 0xfb, 0xbe, 0x7a, 0x18, 0xde, 0x08, 0x8e, 0x3a, 0x8d, 0xc7, 0x03, 0x12, 0xba, 0x24, + 0xe4, 0xff, 0x4f, 0x42, 0xec, 0x75, 0x88, 0x52, 0xac, 0x07, 0xa1, 0x4f, 0x7d, 0x04, 0xe3, 0x96, + 0x8d, 0x66, 0xc7, 0xa5, 0xdd, 0xc1, 0x41, 0xbd, 0xe5, 0xf7, 0x1b, 0x1d, 0xbf, 0xe3, 0x37, 0x3a, + 0xbe, 0xdf, 0xe9, 0x11, 0x1c, 0xb8, 0x91, 0x2c, 0x36, 0xc2, 0xa0, 0xd5, 0x88, 0x28, 0xa6, 0x83, + 0x48, 0xe0, 0x37, 0xd6, 0x98, 0x21, 0x2f, 0x72, 0x88, 0x6c, 0xb5, 0xa4, 0x39, 0xaf, 0x1d, 0x0c, + 0x0e, 0x1b, 0xd4, 0xed, 0x93, 0x88, 0xe2, 0x7e, 0x90, 0x18, 0xb0, 0xf9, 0xf5, 0xfc, 0x8e, 0x40, + 0xba, 0x5e, 0x9b, 0x3c, 0xed, 0x60, 0x4a, 0x9e, 0xe0, 0x13, 0x69, 0xf0, 0xaa, 0x66, 0x90, 0x14, + 0x64, 0xe7, 0x86, 0xd6, 0x19, 0x60, 0x4a, 0x49, 0xe8, 0xc9, 0xbe, 0xaf, 0x69, 0x7d, 0xd1, 0x11, + 0xa1, 0xad, 0xae, 0xec, 0xaa, 0xca, 0xae, 0xc7, 0xbd, 0xbe, 0xdf, 0x26, 0x3d, 0xbe, 0x90, 0x48, + 0xfc, 0x95, 0x16, 0x57, 0x99, 0x45, 0x30, 0x88, 0xba, 0xfc, 0x8f, 0x6c, 0xbc, 0xf3, 0x52, 0x2e, + 0x0f, 0x70, 0x44, 0x1a, 0x6d, 0x72, 0xe8, 0x7a, 0x2e, 0x75, 0x7d, 0x2f, 0x52, 0xcb, 0x72, 0x90, + 0x1b, 0xf3, 0x0d, 0x32, 0xe9, 0x9f, 0x8d, 0x37, 0x19, 0x2e, 0xa2, 0x7e, 0x88, 0x3b, 0xa4, 0xd1, + 0xea, 0x0e, 0xbc, 0xa3, 0x46, 0x0b, 0xb7, 0xba, 0xa4, 0x11, 0x92, 0x68, 0xd0, 0xa3, 0x91, 0xa8, + 0xd0, 0x93, 0x80, 0xc8, 0x2f, 0xd5, 0x3e, 0xcb, 0xc3, 0xe2, 0x43, 0xff, 0xc8, 0x75, 0xc8, 0xe3, + 0x01, 0x89, 0x28, 0x5a, 0x83, 0x02, 0x1f, 0xd5, 0x34, 0xaa, 0xc6, 0x66, 0xd9, 0x11, 0x15, 0xd6, + 0xda, 0x73, 0xfb, 0x2e, 0x35, 0xb3, 0x55, 0x63, 0x73, 0xd9, 0x11, 0x15, 0x84, 0x20, 0x1f, 0x51, + 0x12, 0x98, 0xb9, 0xaa, 0xb1, 0x99, 0x73, 0x78, 0x19, 0x6d, 0xc0, 0x82, 0xeb, 0x51, 0x12, 0x1e, + 0xe3, 0x9e, 0x59, 0xe6, 0xed, 0x69, 0x1d, 0xdd, 0x82, 0x52, 0x44, 0x71, 0x48, 0xf7, 0x23, 0x33, + 0x5f, 0x35, 0x36, 0x17, 0x9b, 0x1b, 0x75, 0xe1, 0xf9, 0x7a, 0xe2, 0xf9, 0xfa, 0x7e, 0xe2, 0x79, + 0x7b, 0xe1, 0xd3, 0xd8, 0xca, 0x7c, 0xf2, 0x27, 0xcb, 0x70, 0x12, 0x10, 0xda, 0x86, 0x02, 0xf1, + 0xda, 0xfb, 0x91, 0x59, 0x38, 0x07, 0x5a, 0x40, 0xd0, 0x75, 0x28, 0xb7, 0xdd, 0x90, 0xb4, 0x18, + 0xcb, 0x66, 0xb1, 0x6a, 0x6c, 0xae, 0x34, 0xaf, 0xd6, 0x53, 0xa1, 0xec, 0x24, 0x5d, 0xce, 0xd8, + 0x8a, 0x2d, 0x2f, 0xc0, 0xb4, 0x6b, 0x96, 0x38, 0x13, 0xbc, 0x8c, 0x6a, 0x50, 0x8c, 0xba, 0x38, + 0x6c, 0x47, 0xe6, 0x42, 0x35, 0xb7, 0x59, 0xb6, 0xe1, 0x34, 0xb6, 0x64, 0x8b, 0x23, 0xff, 0xa3, + 0x9f, 0x42, 0x3e, 0xe8, 0x61, 0xcf, 0x04, 0x3e, 0xcb, 0xd5, 0xba, 0xe2, 0xa5, 0x47, 0x3d, 0xec, + 0xd9, 0xef, 0x0e, 0x63, 0xeb, 0x6d, 0x35, 0x78, 0x42, 0x7c, 0x88, 0x3d, 0xdc, 0xe8, 0xf9, 0x47, + 0x6e, 0xe3, 0x78, 0xab, 0xa1, 0xfa, 0x9e, 0x0d, 0x54, 0xff, 0x90, 0x0d, 0xc0, 0xa0, 0x0e, 0x1f, + 0x18, 0x3d, 0x80, 0x45, 0xe6, 0x63, 0x72, 0x87, 0x39, 0x38, 0x32, 0x17, 0xf9, 0x77, 0x5e, 0x19, + 0xaf, 0x86, 0xb7, 0x3b, 0xe4, 0xf0, 0x07, 0xa1, 0x3f, 0x08, 0xec, 0x2b, 0xa7, 0xb1, 0xa5, 0xda, + 0x3b, 0x6a, 0x05, 0x3d, 0x80, 0x15, 0x26, 0x0a, 0xd7, 0xeb, 0x7c, 0x10, 0x70, 0x05, 0x9a, 0x4b, + 0x7c, 0xb8, 0x6b, 0x75, 0x55, 0x32, 0xf5, 0x3b, 0x9a, 0x8d, 0x9d, 0x67, 0xf4, 0x3a, 0x13, 0xc8, + 0xda, 0x28, 0x07, 0x88, 0x69, 0xe9, 0xbe, 0x17, 0x51, 0xec, 0xd1, 0x8b, 0x48, 0xea, 0x26, 0x14, + 0x59, 0xf0, 0xef, 0x47, 0x5c, 0x54, 0xf3, 0xfa, 0x58, 0x62, 0x74, 0x27, 0xe7, 0xcf, 0xe5, 0xe4, + 0xc2, 0x4c, 0x27, 0x17, 0x5f, 0xea, 0xe4, 0xd2, 0x7f, 0xc9, 0xc9, 0x0b, 0xff, 0x59, 0x27, 0x97, + 0x2f, 0xec, 0x64, 0x13, 0xf2, 0x6c, 0x96, 0x68, 0x15, 0x72, 0x21, 0x7e, 0xc2, 0x7d, 0xba, 0xe4, + 0xb0, 0x62, 0x6d, 0x94, 0x87, 0x25, 0x91, 0x4a, 0xa2, 0xc0, 0xf7, 0x22, 0xc2, 0x78, 0xdc, 0xe3, + 0xd9, 0x5f, 0x78, 0x5e, 0xf2, 0xc8, 0x5b, 0x1c, 0xd9, 0x83, 0xde, 0x83, 0xfc, 0x0e, 0xa6, 0x98, + 0xab, 0x60, 0xb1, 0xb9, 0xa6, 0xf2, 0xc8, 0xc6, 0x62, 0x7d, 0xf6, 0x3a, 0x9b, 0xc8, 0x69, 0x6c, + 0xad, 0xb4, 0x31, 0xc5, 0xdf, 0xf6, 0xfb, 0x2e, 0x25, 0xfd, 0x80, 0x9e, 0x38, 0x1c, 0x89, 0xde, + 0x86, 0xf2, 0xdd, 0x30, 0xf4, 0xc3, 0xfd, 0x93, 0x80, 0x70, 0xd5, 0x94, 0xed, 0x57, 0x4e, 0x63, + 0xeb, 0x2a, 0x49, 0x1a, 0x15, 0xc4, 0xd8, 0x12, 0x7d, 0x13, 0x0a, 0xbc, 0xc2, 0x75, 0x52, 0xb6, + 0xaf, 0x9e, 0xc6, 0xd6, 0x15, 0x0e, 0x51, 0xcc, 0x85, 0x85, 0x2e, 0xab, 0xc2, 0x5c, 0xb2, 0x4a, + 0xd5, 0x5d, 0x54, 0xd5, 0x6d, 0x42, 0xe9, 0x98, 0x84, 0x11, 0x1b, 0xa6, 0xc4, 0xdb, 0x93, 0x2a, + 0xba, 0x0d, 0xc0, 0x88, 0x71, 0x23, 0xea, 0xb6, 0x12, 0x67, 0x2f, 0xd7, 0xc5, 0x66, 0xe3, 0x70, + 0x1f, 0xd9, 0x48, 0xb2, 0xa0, 0x18, 0x3a, 0x4a, 0x19, 0xfd, 0xc6, 0x80, 0xd2, 0x2e, 0xc1, 0x6d, + 0x12, 0x32, 0xf7, 0xe6, 0x36, 0x17, 0x9b, 0xdf, 0xa8, 0xab, 0x3b, 0xcb, 0xa3, 0xd0, 0xef, 0x13, + 0xda, 0x25, 0x83, 0x28, 0x71, 0x90, 0xb0, 0xb6, 0xbd, 0x61, 0x6c, 0x91, 0x39, 0xa5, 0x3a, 0xd7, + 0x86, 0x76, 0xe6, 0xa7, 0x4e, 0x63, 0xcb, 0xf8, 0x8e, 0x93, 0xcc, 0x12, 0x35, 0x61, 0xe1, 0x09, + 0x0e, 0x3d, 0xd7, 0xeb, 0x44, 0x26, 0xf0, 0x48, 0x5b, 0x3f, 0x8d, 0x2d, 0x94, 0xb4, 0x29, 0x8e, + 0x48, 0xed, 0x6a, 0x7f, 0x34, 0xe0, 0x2b, 0x4c, 0x18, 0x7b, 0x6c, 0x3e, 0x91, 0x92, 0x62, 0xfa, + 0x98, 0xb6, 0xba, 0xa6, 0xc1, 0x86, 0x71, 0x44, 0x45, 0xdd, 0x6f, 0xb2, 0xff, 0xd6, 0x7e, 0x93, + 0x3b, 0xff, 0x7e, 0x93, 0xe4, 0x95, 0xfc, 0xcc, 0xbc, 0x52, 0x38, 0x2b, 0xaf, 0xd4, 0x7e, 0x29, + 0x73, 0x68, 0xb2, 0xbe, 0x73, 0x84, 0xd2, 0xbd, 0x34, 0x94, 0x72, 0x7c, 0xb6, 0xa9, 0x42, 0xc5, + 0x58, 0xf7, 0xdb, 0xc4, 0xa3, 0xee, 0xa1, 0x4b, 0xc2, 0x97, 0x04, 0x94, 0xa2, 0xd2, 0x9c, 0xae, + 0x52, 0x55, 0x62, 0xf9, 0x4b, 0x21, 0x31, 0x3d, 0xae, 0x0a, 0x17, 0x88, 0xab, 0xda, 0xdf, 0xb3, + 0xb0, 0xce, 0x3c, 0xf2, 0x10, 0x1f, 0x90, 0xde, 0x8f, 0x70, 0xff, 0x9c, 0x5e, 0x79, 0x5d, 0xf1, + 0x4a, 0xd9, 0x46, 0x5f, 0xb2, 0x3e, 0x1f, 0xeb, 0xbf, 0x32, 0x60, 0x21, 0xd9, 0x00, 0x50, 0x1d, + 0x40, 0xc0, 0x78, 0x8e, 0x17, 0x5c, 0xaf, 0x30, 0x70, 0x98, 0xb6, 0x3a, 0x8a, 0x05, 0xfa, 0x19, + 0x14, 0x45, 0x4d, 0xc6, 0x82, 0xb2, 0x6d, 0xee, 0xd1, 0x90, 0xe0, 0xfe, 0xed, 0x36, 0x0e, 0x28, + 0x09, 0xed, 0x77, 0xd9, 0x2c, 0x86, 0xb1, 0xf5, 0xc6, 0x59, 0x2c, 0x25, 0x27, 0x7c, 0x89, 0x63, + 0xfe, 0x15, 0xdf, 0x74, 0xe4, 0x17, 0x6a, 0x1f, 0x1b, 0xb0, 0xca, 0x26, 0xca, 0xa8, 0x49, 0x85, + 0xb1, 0x03, 0x0b, 0xa1, 0x2c, 0xf3, 0xe9, 0x2e, 0x36, 0x6b, 0x75, 0x9d, 0xd6, 0x19, 0x54, 0xf2, + 0x0d, 0xd7, 0x70, 0x52, 0x24, 0xda, 0xd2, 0x68, 0xcc, 0xce, 0xa2, 0x51, 0xec, 0xd1, 0x2a, 0x71, + 0xbf, 0xcf, 0x02, 0xba, 0xcf, 0x6e, 0x48, 0x4c, 0x7f, 0x63, 0xa9, 0x3e, 0x9d, 0x9a, 0xd1, 0xb5, + 0x31, 0x29, 0xd3, 0xf6, 0xf6, 0xad, 0x61, 0x6c, 0x6d, 0xbf, 0x44, 0x3b, 0xff, 0x02, 0xaf, 0xac, + 0x42, 0x95, 0x6f, 0xf6, 0x32, 0xc8, 0xb7, 0xf6, 0xdb, 0x2c, 0xac, 0xfc, 0xd8, 0xef, 0x0d, 0xfa, + 0x24, 0xa5, 0x2f, 0x98, 0xa2, 0xcf, 0x1c, 0xd3, 0xa7, 0xdb, 0xda, 0xdb, 0xc3, 0xd8, 0xba, 0x31, + 0x2f, 0x75, 0x3a, 0xf6, 0x52, 0xd3, 0xf6, 0xd7, 0x2c, 0xac, 0xed, 0xfb, 0xc1, 0x0f, 0xf7, 0xf8, + 0x2d, 0x5a, 0x49, 0x93, 0xdd, 0x29, 0xf2, 0xd6, 0xc6, 0xe4, 0x31, 0xc4, 0xfb, 0x98, 0x86, 0xee, + 0x53, 0xfb, 0xc6, 0x30, 0xb6, 0x9a, 0xf3, 0x12, 0x37, 0xc6, 0x5d, 0x66, 0xd2, 0xb4, 0x33, 0x50, + 0x6e, 0xce, 0x33, 0xd0, 0x3f, 0xb2, 0xb0, 0xfe, 0xe1, 0x00, 0x7b, 0xd4, 0xed, 0x11, 0x41, 0x76, + 0x4a, 0xf5, 0xcf, 0xa7, 0xa8, 0xae, 0x8c, 0xa9, 0xd6, 0x31, 0x92, 0xf4, 0xf7, 0x86, 0xb1, 0x75, + 0x73, 0x5e, 0xd2, 0x67, 0x8d, 0xf0, 0x7f, 0x47, 0xff, 0xef, 0xb2, 0xb0, 0xb2, 0x27, 0x4e, 0x6d, + 0xc9, 0xc2, 0x8f, 0x67, 0xd0, 0xae, 0x3e, 0x53, 0x05, 0x07, 0x75, 0x1d, 0x71, 0xbe, 0x24, 0xa1, + 0x63, 0x2f, 0x75, 0x92, 0xf8, 0x43, 0x16, 0xd6, 0x77, 0x08, 0x25, 0x2d, 0x4a, 0xda, 0xf7, 0x5c, + 0xd2, 0x53, 0x48, 0xfc, 0xc8, 0x98, 0x62, 0xb1, 0xaa, 0x5c, 0xb3, 0x66, 0x82, 0x6c, 0x7b, 0x18, + 0x5b, 0xb7, 0xe6, 0xe5, 0x71, 0xf6, 0x18, 0x97, 0x9a, 0xcf, 0xcf, 0xb2, 0xf0, 0x55, 0xf1, 0x74, + 0x20, 0xde, 0x35, 0xc7, 0x74, 0xfe, 0x62, 0x8a, 0x4d, 0x4b, 0x4d, 0x05, 0x33, 0x20, 0xf6, 0xed, + 0x61, 0x6c, 0x7d, 0x7f, 0xfe, 0x5c, 0x30, 0x63, 0x88, 0xff, 0x19, 0x6d, 0xf2, 0xd3, 0xfe, 0x79, + 0xb5, 0xa9, 0x83, 0x2e, 0xa6, 0x4d, 0x7d, 0x8c, 0x4b, 0xcd, 0xe7, 0x5f, 0x8a, 0xb0, 0xcc, 0x55, + 0x92, 0xd2, 0xf8, 0x2d, 0x90, 0xd7, 0x23, 0xc9, 0x21, 0x4a, 0xae, 0xd4, 0x61, 0xd0, 0xaa, 0xef, + 0xc9, 0x8b, 0x93, 0xb0, 0x40, 0xef, 0x40, 0x31, 0xe2, 0x17, 0x57, 0x79, 0xf2, 0xad, 0x4c, 0xbe, + 0x0d, 0xe9, 0x57, 0xe4, 0xdd, 0x8c, 0x23, 0xed, 0xd1, 0x4d, 0x28, 0xf6, 0x38, 0x8b, 0xf2, 0xe2, + 0x5e, 0x9b, 0x44, 0x4e, 0x5f, 0xe5, 0x18, 0x5a, 0x60, 0xd0, 0x0d, 0x28, 0xf0, 0x23, 0xb6, 0x7c, + 0xa3, 0xd6, 0x3e, 0x3b, 0x7d, 0xd0, 0xdd, 0xcd, 0x38, 0xc2, 0x1c, 0x35, 0x21, 0x1f, 0x84, 0x7e, + 0x5f, 0x5e, 0x77, 0xae, 0x4d, 0x7e, 0x53, 0xbd, 0x1f, 0xec, 0x66, 0x1c, 0x6e, 0x8b, 0xde, 0x82, + 0x52, 0xc4, 0x2f, 0x16, 0x11, 0x7f, 0x28, 0x62, 0xa7, 0xca, 0x09, 0x98, 0x02, 0x49, 0x4c, 0xd1, + 0x5b, 0x50, 0x3c, 0xe6, 0xc7, 0x46, 0xf9, 0xfa, 0xb8, 0xa1, 0x82, 0xf4, 0x03, 0x25, 0x5b, 0x97, + 0xb0, 0x45, 0xf7, 0x60, 0x89, 0xfa, 0xc1, 0x51, 0x72, 0x3a, 0x93, 0x8f, 0x4c, 0x55, 0x15, 0x3b, + 0xeb, 0xf4, 0xb6, 0x9b, 0x71, 0x34, 0x1c, 0x7a, 0x04, 0xab, 0x8f, 0xb5, 0x63, 0x00, 0x49, 0x9e, + 0x13, 0x35, 0x9e, 0x67, 0x1f, 0x50, 0x76, 0x33, 0xce, 0x14, 0x1a, 0xed, 0xc0, 0x4a, 0xa4, 0xed, + 0x70, 0xf2, 0xe9, 0x5c, 0x5b, 0x97, 0xbe, 0x07, 0xee, 0x66, 0x9c, 0x09, 0x0c, 0x7a, 0x08, 0x2b, + 0x6d, 0x2d, 0xbf, 0xcb, 0x87, 0x71, 0x6d, 0x56, 0xb3, 0x77, 0x00, 0x36, 0x9a, 0x8e, 0x45, 0x1f, + 0xc0, 0x6a, 0x30, 0x91, 0xdb, 0xe4, 0xcb, 0xf8, 0xd7, 0xf5, 0x55, 0xce, 0x48, 0x82, 0x6c, 0x91, + 0x93, 0x60, 0x75, 0x7a, 0x22, 0xc4, 0xcd, 0xe5, 0xb3, 0xa7, 0xa7, 0x27, 0x01, 0x75, 0x7a, 0xa2, + 0xc7, 0x86, 0x71, 0x3a, 0xaa, 0x7d, 0x5c, 0x84, 0x25, 0x19, 0x66, 0xe2, 0x35, 0xec, 0xbb, 0x69, + 0xe4, 0x88, 0x28, 0x7b, 0xed, 0xac, 0xc8, 0xe1, 0xe6, 0x4a, 0xe0, 0xbc, 0x99, 0x06, 0x8e, 0x08, + 0xb9, 0xf5, 0x71, 0x8a, 0xe3, 0xdf, 0x55, 0x10, 0x32, 0x58, 0xb6, 0x92, 0x60, 0x11, 0x91, 0xf6, + 0xea, 0xec, 0x3b, 0x65, 0x82, 0x92, 0x91, 0xb2, 0x0d, 0x25, 0x57, 0xfc, 0x44, 0x30, 0x2b, 0xc6, + 0xa6, 0x7f, 0x41, 0x60, 0xda, 0x97, 0x00, 0xb4, 0x35, 0x8e, 0x98, 0x82, 0x7c, 0x12, 0x9f, 0x8a, + 0x98, 0x14, 0x94, 0x04, 0xcc, 0xf5, 0x34, 0x60, 0x8a, 0x93, 0xcf, 0xe8, 0x49, 0xb8, 0xa4, 0x0b, + 0x93, 0xd1, 0x72, 0x17, 0x96, 0x13, 0x7d, 0xf1, 0x2e, 0x19, 0x2e, 0xaf, 0x9d, 0x75, 0xac, 0x4b, + 0xf0, 0x3a, 0x0a, 0xdd, 0x9f, 0x12, 0x65, 0x79, 0x72, 0x2b, 0x9e, 0x94, 0x64, 0x32, 0xd2, 0xa4, + 0x22, 0x1f, 0xc0, 0x95, 0xb1, 0xa8, 0xc4, 0x9c, 0x60, 0xfa, 0x84, 0xaf, 0xc9, 0x31, 0x19, 0x6a, + 0x12, 0xa8, 0x4e, 0x4b, 0x8a, 0x71, 0xf1, 0xac, 0x69, 0x25, 0x52, 0x9c, 0x9a, 0x96, 0xe8, 0x40, + 0xbb, 0xb0, 0xd0, 0x27, 0x14, 0xb7, 0x31, 0xc5, 0x66, 0x89, 0x6f, 0x4b, 0xaf, 0x4f, 0x05, 0x88, + 0x44, 0xd7, 0xdf, 0x97, 0x86, 0x77, 0x3d, 0x1a, 0x9e, 0xc8, 0xb7, 0x8b, 0x14, 0xbd, 0xf1, 0x3d, + 0x58, 0xd6, 0x0c, 0xd0, 0x2a, 0xe4, 0x8e, 0x48, 0xf2, 0xb3, 0x11, 0x2b, 0xa2, 0x35, 0x28, 0x1c, + 0xe3, 0xde, 0x80, 0x70, 0x7d, 0x96, 0x1d, 0x51, 0xd9, 0xce, 0xbe, 0x63, 0xd8, 0x65, 0x28, 0x85, + 0xe2, 0x2b, 0x76, 0xe7, 0xd9, 0xf3, 0x4a, 0xe6, 0xf3, 0xe7, 0x95, 0xcc, 0x17, 0xcf, 0x2b, 0xc6, + 0x47, 0xa3, 0x8a, 0xf1, 0xeb, 0x51, 0xc5, 0xf8, 0x74, 0x54, 0x31, 0x9e, 0x8d, 0x2a, 0xc6, 0x9f, + 0x47, 0x15, 0xe3, 0x6f, 0xa3, 0x4a, 0xe6, 0x8b, 0x51, 0xc5, 0xf8, 0xe4, 0x45, 0x25, 0xf3, 0xec, + 0x45, 0x25, 0xf3, 0xf9, 0x8b, 0x4a, 0xe6, 0x27, 0xd7, 0xcf, 0xbd, 0x43, 0x1e, 0x14, 0x39, 0x53, + 0x5b, 0xff, 0x0c, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x35, 0x32, 0x52, 0x3f, 0x1f, 0x00, 0x00, } func (this *LokiRequest) Equal(that interface{}) bool { @@ -1653,6 +1673,9 @@ func (this *LokiRequest) Equal(that interface{}) bool { if !this.StoreChunks.Equal(that1.StoreChunks) { return false } + if !this.CachingOptions.Equal(&that1.CachingOptions) { + return false + } return true } func (this *LokiInstantRequest) Equal(that interface{}) bool { @@ -1707,6 +1730,9 @@ func (this *LokiInstantRequest) Equal(that interface{}) bool { if !this.StoreChunks.Equal(that1.StoreChunks) { return false } + if !this.CachingOptions.Equal(&that1.CachingOptions) { + return false + } return true } func (this *Plan) Equal(that interface{}) bool { @@ -2898,7 +2924,7 @@ func (this *LokiRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 15) + s := make([]string, 0, 16) s = append(s, "&queryrange.LokiRequest{") s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n") s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") @@ -2913,6 +2939,7 @@ func (this *LokiRequest) GoString() string { if this.StoreChunks != nil { s = append(s, "StoreChunks: "+fmt.Sprintf("%#v", this.StoreChunks)+",\n") } + s = append(s, "CachingOptions: "+strings.Replace(this.CachingOptions.GoString(), `&`, ``, 1)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -2920,7 +2947,7 @@ func (this *LokiInstantRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 12) + s := make([]string, 0, 13) s = append(s, "&queryrange.LokiInstantRequest{") s = append(s, "Query: "+fmt.Sprintf("%#v", this.Query)+",\n") s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n") @@ -2932,6 +2959,7 @@ func (this *LokiInstantRequest) GoString() string { if this.StoreChunks != nil { s = append(s, "StoreChunks: "+fmt.Sprintf("%#v", this.StoreChunks)+",\n") } + s = append(s, "CachingOptions: "+strings.Replace(this.CachingOptions.GoString(), `&`, ``, 1)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -3370,6 +3398,16 @@ func (m *LokiRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.CachingOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQueryrange(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 if m.StoreChunks != nil { { size, err := m.StoreChunks.MarshalToSizedBuffer(dAtA[:i]) @@ -3420,21 +3458,21 @@ func (m *LokiRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x30 } - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):]) - if err3 != nil { - return 0, err3 - } - i -= n3 - i = encodeVarintQueryrange(dAtA, i, uint64(n3)) - i-- - dAtA[i] = 0x2a - n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):]) + n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):]) if err4 != nil { return 0, err4 } i -= n4 i = encodeVarintQueryrange(dAtA, i, uint64(n4)) i-- + dAtA[i] = 0x2a + n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):]) + if err5 != nil { + return 0, err5 + } + i -= n5 + i = encodeVarintQueryrange(dAtA, i, uint64(n5)) + i-- dAtA[i] = 0x22 if m.Step != 0 { i = encodeVarintQueryrange(dAtA, i, uint64(m.Step)) @@ -3476,6 +3514,16 @@ func (m *LokiInstantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.CachingOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQueryrange(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a if m.StoreChunks != nil { { size, err := m.StoreChunks.MarshalToSizedBuffer(dAtA[:i]) @@ -3521,12 +3569,12 @@ func (m *LokiInstantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x20 } - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.TimeTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.TimeTs):]) - if err7 != nil { - return 0, err7 + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.TimeTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.TimeTs):]) + if err9 != nil { + return 0, err9 } - i -= n7 - i = encodeVarintQueryrange(dAtA, i, uint64(n7)) + i -= n9 + i = encodeVarintQueryrange(dAtA, i, uint64(n9)) i-- dAtA[i] = 0x1a if m.Limit != 0 { @@ -3712,20 +3760,20 @@ func (m *LokiSeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):]) - if err10 != nil { - return 0, err10 + n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.EndTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.EndTs):]) + if err12 != nil { + return 0, err12 } - i -= n10 - i = encodeVarintQueryrange(dAtA, i, uint64(n10)) + i -= n12 + i = encodeVarintQueryrange(dAtA, i, uint64(n12)) i-- dAtA[i] = 0x1a - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):]) - if err11 != nil { - return 0, err11 + n13, err13 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartTs, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartTs):]) + if err13 != nil { + return 0, err13 } - i -= n11 - i = encodeVarintQueryrange(dAtA, i, uint64(n11)) + i -= n13 + i = encodeVarintQueryrange(dAtA, i, uint64(n13)) i-- dAtA[i] = 0x12 if len(m.Match) > 0 { @@ -4970,6 +5018,8 @@ func (m *LokiRequest) Size() (n int) { l = m.StoreChunks.Size() n += 1 + l + sovQueryrange(uint64(l)) } + l = m.CachingOptions.Size() + n += 1 + l + sovQueryrange(uint64(l)) return n } @@ -5009,6 +5059,8 @@ func (m *LokiInstantRequest) Size() (n int) { l = m.StoreChunks.Size() n += 1 + l + sovQueryrange(uint64(l)) } + l = m.CachingOptions.Size() + n += 1 + l + sovQueryrange(uint64(l)) return n } @@ -5681,6 +5733,7 @@ func (this *LokiRequest) String() string { `Interval:` + fmt.Sprintf("%v", this.Interval) + `,`, `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`, `StoreChunks:` + strings.Replace(fmt.Sprintf("%v", this.StoreChunks), "ChunkRefGroup", "logproto.ChunkRefGroup", 1) + `,`, + `CachingOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CachingOptions), "CachingOptions", "resultscache.CachingOptions", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -5698,6 +5751,7 @@ func (this *LokiInstantRequest) String() string { `Shards:` + fmt.Sprintf("%v", this.Shards) + `,`, `Plan:` + fmt.Sprintf("%v", this.Plan) + `,`, `StoreChunks:` + strings.Replace(fmt.Sprintf("%v", this.StoreChunks), "ChunkRefGroup", "logproto.ChunkRefGroup", 1) + `,`, + `CachingOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CachingOptions), "CachingOptions", "resultscache.CachingOptions", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -6489,6 +6543,39 @@ func (m *LokiRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachingOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CachingOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQueryrange(dAtA[iNdEx:]) @@ -6781,6 +6868,39 @@ func (m *LokiInstantRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachingOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQueryrange + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQueryrange + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQueryrange + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CachingOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipQueryrange(dAtA[iNdEx:]) diff --git a/pkg/querier/queryrange/queryrange.proto b/pkg/querier/queryrange/queryrange.proto index 46513c9f4e1e1..2169b2da31543 100644 --- a/pkg/querier/queryrange/queryrange.proto +++ b/pkg/querier/queryrange/queryrange.proto @@ -13,6 +13,7 @@ import "pkg/logqlmodel/stats/stats.proto"; import "pkg/push/push.proto"; import "pkg/querier/queryrange/queryrangebase/definitions/definitions.proto"; import "pkg/querier/queryrange/queryrangebase/queryrange.proto"; +import "pkg/storage/chunk/cache/resultscache/types.proto"; option go_package = "github.com/grafana/loki/v3/pkg/querier/queryrange"; option (gogoproto.marshaler_all) = true; @@ -39,6 +40,7 @@ message LokiRequest { // If populated, these represent the chunk references that the querier should // use to fetch the data, plus any other chunks reported by ingesters. logproto.ChunkRefGroup storeChunks = 11 [(gogoproto.jsontag) = "storeChunks"]; + resultscache.CachingOptions cachingOptions = 12 [(gogoproto.nullable) = false]; } message LokiInstantRequest { @@ -55,6 +57,7 @@ message LokiInstantRequest { // If populated, these represent the chunk references that the querier should // use to fetch the data, plus any other chunks reported by ingesters. logproto.ChunkRefGroup storeChunks = 8 [(gogoproto.jsontag) = "storeChunks"]; + resultscache.CachingOptions cachingOptions = 9 [(gogoproto.nullable) = false]; } message Plan { diff --git a/pkg/storage/chunk/cache/resultscache/cache_test.go b/pkg/storage/chunk/cache/resultscache/cache_test.go index 964a310f5951f..0febe48020867 100644 --- a/pkg/storage/chunk/cache/resultscache/cache_test.go +++ b/pkg/storage/chunk/cache/resultscache/cache_test.go @@ -682,6 +682,105 @@ func Test_resultsCache_MissingData(t *testing.T) { require.False(t, hit) } +func Test_shouldCacheReq(t *testing.T) { + cfg := Config{ + CacheConfig: cache.Config{ + Cache: cache.NewMockCache(), + }, + } + c, err := cache.New(cfg.CacheConfig, nil, log.NewNopLogger(), stats.ResultCache, constants.Loki) + require.NoError(t, err) + rc := NewResultsCache( + log.NewNopLogger(), + c, + nil, + ConstSplitter(day), + mockLimits{}, + MockMerger{}, + MockExtractor{}, + nil, + nil, + func(_ context.Context, tenantIDs []string, r Request) int { + return 10 + }, + nil, + false, + false, + ) + require.NoError(t, err) + + // create cache with handler + ctx := user.InjectOrgID(context.Background(), "1") + + // create request with start end within the key extents + req := parsedRequest.WithStartEndForCache(time.UnixMilli(50), time.UnixMilli(120)) + + // fill cache + key := ConstSplitter(day).GenerateCacheKey(context.Background(), "1", req) + rc.put(ctx, key, []Extent{mkExtent(50, 120)}) + + // Asserts (when `shouldLookupCache` is non-nil and set to return false (should not cache), resultcache should get result from upstream handler (mockHandler)) + // 1. With `shouldLookupCache` non-nil and set `noCacheReq`, should get result from `next` handler + // 2. With `shouldLookupCache` non-nil and set `cacheReq`, should get result from cache + // 3. With `shouldLookupCache` nil, should get result from cache + + cases := []struct { + name string + shouldLookupCache ShouldCacheReqFn + // expected number of times, upstream `next` handler is called inside results cache + expCount int + }{ + { + name: "don't lookup cache", + shouldLookupCache: noLookupCache, + expCount: 1, + }, + { + name: "lookup cache", + shouldLookupCache: lookupCache, + expCount: 0, + }, + { + name: "nil", + shouldLookupCache: nil, + expCount: 0, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + mh := &mockHandler{} + rc.next = mh + rc.shouldCacheReq = tc.shouldLookupCache + + _, err = rc.Do(ctx, req) + require.NoError(t, err) + require.Equal(t, tc.expCount, mh.called) + + }) + } +} + +type mockHandler struct { + called int + res Response +} + +func (mh *mockHandler) Do(_ context.Context, _ Request) (Response, error) { + mh.called++ + return mh.res, nil +} + +// noLookupCache is of type `ShouldCacheReq` that always returns false (do not cache) +func noLookupCache(_ context.Context, _ Request) bool { + return false +} + +// lookupCache is of type `ShouldCacheReq` that always returns true (cache the result) +func lookupCache(_ context.Context, _ Request) bool { + return true +} + func mkAPIResponse(start, end, step int64) *MockResponse { var samples []*MockSample for i := start; i <= end; i += step { From 8978ecf0c85dfbe18b52632112e5be20eff411cf Mon Sep 17 00:00:00 2001 From: Salva Corts Date: Mon, 20 May 2024 12:36:22 +0200 Subject: [PATCH 36/47] feat: Boilerplate for new bloom build planner and worker components. (#12989) --- docs/sources/shared/configuration.md | 10 ++++++ pkg/bloombuild/builder/builder.go | 50 ++++++++++++++++++++++++++++ pkg/bloombuild/builder/config.go | 21 ++++++++++++ pkg/bloombuild/builder/metrics.go | 26 +++++++++++++++ pkg/bloombuild/config.go | 40 ++++++++++++++++++++++ pkg/bloombuild/planner/config.go | 21 ++++++++++++ pkg/bloombuild/planner/metrics.go | 26 +++++++++++++++ pkg/bloombuild/planner/planner.go | 50 ++++++++++++++++++++++++++++ pkg/loki/loki.go | 7 ++++ pkg/loki/modules.go | 34 ++++++++++++++++++- 10 files changed, 284 insertions(+), 1 deletion(-) create mode 100644 pkg/bloombuild/builder/builder.go create mode 100644 pkg/bloombuild/builder/config.go create mode 100644 pkg/bloombuild/builder/metrics.go create mode 100644 pkg/bloombuild/config.go create mode 100644 pkg/bloombuild/planner/config.go create mode 100644 pkg/bloombuild/planner/metrics.go create mode 100644 pkg/bloombuild/planner/planner.go diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index fe57f40daa581..f59f6501c94e5 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -326,6 +326,16 @@ pattern_ingester: # merging them as bloom blocks. [bloom_compactor: ] +bloom_build: + # Flag to enable or disable the usage of the bloom-planner and bloom-builder + # components. + # CLI flag: -bloom-build.enabled + [enabled: | default = false] + + planner: + + builder: + # Experimental: The bloom_gateway block configures the Loki bloom gateway # server, responsible for serving queries for filtering chunks based on filter # expressions. diff --git a/pkg/bloombuild/builder/builder.go b/pkg/bloombuild/builder/builder.go new file mode 100644 index 0000000000000..098e7d6d83f00 --- /dev/null +++ b/pkg/bloombuild/builder/builder.go @@ -0,0 +1,50 @@ +package builder + +import ( + "context" + + "github.com/go-kit/log" + "github.com/grafana/dskit/services" + "github.com/prometheus/client_golang/prometheus" + + utillog "github.com/grafana/loki/v3/pkg/util/log" +) + +type Worker struct { + services.Service + + cfg Config + metrics *Metrics + logger log.Logger +} + +func New( + cfg Config, + logger log.Logger, + r prometheus.Registerer, +) (*Worker, error) { + utillog.WarnExperimentalUse("Bloom Builder", logger) + + w := &Worker{ + cfg: cfg, + metrics: NewMetrics(r), + logger: logger, + } + + w.Service = services.NewBasicService(w.starting, w.running, w.stopping) + return w, nil +} + +func (w *Worker) starting(_ context.Context) (err error) { + w.metrics.running.Set(1) + return err +} + +func (w *Worker) stopping(_ error) error { + w.metrics.running.Set(0) + return nil +} + +func (w *Worker) running(_ context.Context) error { + return nil +} diff --git a/pkg/bloombuild/builder/config.go b/pkg/bloombuild/builder/config.go new file mode 100644 index 0000000000000..ac282ccf95ebb --- /dev/null +++ b/pkg/bloombuild/builder/config.go @@ -0,0 +1,21 @@ +package builder + +import "flag" + +// Config configures the bloom-builder component. +type Config struct { + // TODO: Add config +} + +// RegisterFlagsWithPrefix registers flags for the bloom-planner configuration. +func (cfg *Config) RegisterFlagsWithPrefix(_ string, _ *flag.FlagSet) { + // TODO: Register flags with flagsPrefix +} + +func (cfg *Config) Validate() error { + return nil +} + +type Limits interface { + // TODO: Add limits +} diff --git a/pkg/bloombuild/builder/metrics.go b/pkg/bloombuild/builder/metrics.go new file mode 100644 index 0000000000000..e8f46fa025080 --- /dev/null +++ b/pkg/bloombuild/builder/metrics.go @@ -0,0 +1,26 @@ +package builder + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +const ( + metricsNamespace = "loki" + metricsSubsystem = "bloombuilder" +) + +type Metrics struct { + running prometheus.Gauge +} + +func NewMetrics(r prometheus.Registerer) *Metrics { + return &Metrics{ + running: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Namespace: metricsNamespace, + Subsystem: metricsSubsystem, + Name: "running", + Help: "Value will be 1 if the bloom builder is currently running on this instance", + }), + } +} diff --git a/pkg/bloombuild/config.go b/pkg/bloombuild/config.go new file mode 100644 index 0000000000000..c69c605607f5a --- /dev/null +++ b/pkg/bloombuild/config.go @@ -0,0 +1,40 @@ +package bloombuild + +import ( + "flag" + "fmt" + + "github.com/grafana/loki/v3/pkg/bloombuild/builder" + "github.com/grafana/loki/v3/pkg/bloombuild/planner" +) + +// Config configures the bloom-planner component. +type Config struct { + Enabled bool `yaml:"enabled"` + + Planner planner.Config `yaml:"planner"` + Builder builder.Config `yaml:"builder"` +} + +// RegisterFlags registers flags for the bloom building configuration. +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.BoolVar(&cfg.Enabled, "bloom-build.enabled", false, "Flag to enable or disable the usage of the bloom-planner and bloom-builder components.") + cfg.Planner.RegisterFlagsWithPrefix("bloom-build.planner", f) + cfg.Builder.RegisterFlagsWithPrefix("bloom-build.builder", f) +} + +func (cfg *Config) Validate() error { + if !cfg.Enabled { + return nil + } + + if err := cfg.Planner.Validate(); err != nil { + return fmt.Errorf("invalid bloom planner configuration: %w", err) + } + + if err := cfg.Builder.Validate(); err != nil { + return fmt.Errorf("invalid bloom builder configuration: %w", err) + } + + return nil +} diff --git a/pkg/bloombuild/planner/config.go b/pkg/bloombuild/planner/config.go new file mode 100644 index 0000000000000..dd8cb315d9345 --- /dev/null +++ b/pkg/bloombuild/planner/config.go @@ -0,0 +1,21 @@ +package planner + +import "flag" + +// Config configures the bloom-planner component. +type Config struct { + // TODO: Add config +} + +// RegisterFlagsWithPrefix registers flags for the bloom-planner configuration. +func (cfg *Config) RegisterFlagsWithPrefix(_ string, _ *flag.FlagSet) { + // TODO: Register flags with flagsPrefix +} + +func (cfg *Config) Validate() error { + return nil +} + +type Limits interface { + // TODO: Add limits +} diff --git a/pkg/bloombuild/planner/metrics.go b/pkg/bloombuild/planner/metrics.go new file mode 100644 index 0000000000000..e9a9035e14df0 --- /dev/null +++ b/pkg/bloombuild/planner/metrics.go @@ -0,0 +1,26 @@ +package planner + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +const ( + metricsNamespace = "loki" + metricsSubsystem = "bloomplanner" +) + +type Metrics struct { + running prometheus.Gauge +} + +func NewMetrics(r prometheus.Registerer) *Metrics { + return &Metrics{ + running: promauto.With(r).NewGauge(prometheus.GaugeOpts{ + Namespace: metricsNamespace, + Subsystem: metricsSubsystem, + Name: "running", + Help: "Value will be 1 if bloom planner is currently running on this instance", + }), + } +} diff --git a/pkg/bloombuild/planner/planner.go b/pkg/bloombuild/planner/planner.go new file mode 100644 index 0000000000000..7732d180b0bb8 --- /dev/null +++ b/pkg/bloombuild/planner/planner.go @@ -0,0 +1,50 @@ +package planner + +import ( + "context" + + "github.com/go-kit/log" + "github.com/grafana/dskit/services" + "github.com/prometheus/client_golang/prometheus" + + utillog "github.com/grafana/loki/v3/pkg/util/log" +) + +type Planner struct { + services.Service + + cfg Config + metrics *Metrics + logger log.Logger +} + +func New( + cfg Config, + logger log.Logger, + r prometheus.Registerer, +) (*Planner, error) { + utillog.WarnExperimentalUse("Bloom Planner", logger) + + p := &Planner{ + cfg: cfg, + metrics: NewMetrics(r), + logger: logger, + } + + p.Service = services.NewBasicService(p.starting, p.running, p.stopping) + return p, nil +} + +func (p *Planner) starting(_ context.Context) (err error) { + p.metrics.running.Set(1) + return err +} + +func (p *Planner) stopping(_ error) error { + p.metrics.running.Set(0) + return nil +} + +func (p *Planner) running(_ context.Context) error { + return nil +} diff --git a/pkg/loki/loki.go b/pkg/loki/loki.go index b682c4bfaa65c..9446b351aab82 100644 --- a/pkg/loki/loki.go +++ b/pkg/loki/loki.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/health/grpc_health_v1" "github.com/grafana/loki/v3/pkg/analytics" + "github.com/grafana/loki/v3/pkg/bloombuild" "github.com/grafana/loki/v3/pkg/bloomcompactor" "github.com/grafana/loki/v3/pkg/bloomgateway" "github.com/grafana/loki/v3/pkg/compactor" @@ -90,6 +91,7 @@ type Config struct { Pattern pattern.Config `yaml:"pattern_ingester,omitempty"` IndexGateway indexgateway.Config `yaml:"index_gateway"` BloomCompactor bloomcompactor.Config `yaml:"bloom_compactor,omitempty" category:"experimental"` + BloomBuild bloombuild.Config `yaml:"bloom_build,omitempty" category:"experimental"` BloomGateway bloomgateway.Config `yaml:"bloom_gateway,omitempty" category:"experimental"` StorageConfig storage.Config `yaml:"storage_config,omitempty"` ChunkStoreConfig config.ChunkStoreConfig `yaml:"chunk_store_config,omitempty"` @@ -173,6 +175,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.Tracing.RegisterFlags(f) c.CompactorConfig.RegisterFlags(f) c.BloomCompactor.RegisterFlags(f) + c.BloomBuild.RegisterFlags(f) c.QueryScheduler.RegisterFlags(f) c.Analytics.RegisterFlags(f) c.OperationalConfig.RegisterFlags(f) @@ -649,6 +652,8 @@ func (t *Loki) setupModuleManager() error { mm.RegisterModule(BloomStore, t.initBloomStore) mm.RegisterModule(BloomCompactor, t.initBloomCompactor) mm.RegisterModule(BloomCompactorRing, t.initBloomCompactorRing, modules.UserInvisibleModule) + mm.RegisterModule(BloomPlanner, t.initBloomPlanner) + mm.RegisterModule(BloomBuilder, t.initBloomBuilder) mm.RegisterModule(IndexGateway, t.initIndexGateway) mm.RegisterModule(IndexGatewayRing, t.initIndexGatewayRing, modules.UserInvisibleModule) mm.RegisterModule(IndexGatewayInterceptors, t.initIndexGatewayInterceptors, modules.UserInvisibleModule) @@ -686,6 +691,8 @@ func (t *Loki) setupModuleManager() error { IndexGateway: {Server, Store, BloomStore, IndexGatewayRing, IndexGatewayInterceptors, Analytics}, BloomGateway: {Server, BloomStore, Analytics}, BloomCompactor: {Server, BloomStore, BloomCompactorRing, Analytics, Store}, + BloomPlanner: {Server, BloomStore, Analytics, Store}, + BloomBuilder: {Server, BloomStore, Analytics, Store}, PatternIngester: {Server, MemberlistKV, Analytics}, PatternRingClient: {Server, MemberlistKV, Analytics}, IngesterQuerier: {Ring}, diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 0280bd514d3c1..a563e80f789fe 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -38,6 +38,8 @@ import ( "github.com/grafana/loki/v3/pkg/storage/types" "github.com/grafana/loki/v3/pkg/analytics" + "github.com/grafana/loki/v3/pkg/bloombuild/builder" + "github.com/grafana/loki/v3/pkg/bloombuild/planner" "github.com/grafana/loki/v3/pkg/bloomgateway" "github.com/grafana/loki/v3/pkg/compactor" compactorclient "github.com/grafana/loki/v3/pkg/compactor/client" @@ -122,6 +124,8 @@ const ( QuerySchedulerRing string = "query-scheduler-ring" BloomCompactor string = "bloom-compactor" BloomCompactorRing string = "bloom-compactor-ring" + BloomPlanner string = "bloom-planner" + BloomBuilder string = "bloom-builder" BloomStore string = "bloom-store" All string = "all" Read string = "read" @@ -803,7 +807,7 @@ func (t *Loki) updateConfigForShipperStore() { t.Cfg.StorageConfig.TSDBShipperConfig.Mode = indexshipper.ModeWriteOnly t.Cfg.StorageConfig.TSDBShipperConfig.IngesterDBRetainPeriod = shipperQuerierIndexUpdateDelay(t.Cfg.StorageConfig.IndexCacheValidity, t.Cfg.StorageConfig.TSDBShipperConfig.ResyncInterval) - case t.Cfg.isTarget(Querier), t.Cfg.isTarget(Ruler), t.Cfg.isTarget(Read), t.Cfg.isTarget(Backend), t.isModuleActive(IndexGateway), t.Cfg.isTarget(BloomCompactor): + case t.Cfg.isTarget(Querier), t.Cfg.isTarget(Ruler), t.Cfg.isTarget(Read), t.Cfg.isTarget(Backend), t.isModuleActive(IndexGateway), t.Cfg.isTarget(BloomCompactor), t.Cfg.isTarget(BloomPlanner), t.Cfg.isTarget(BloomBuilder): // We do not want query to do any updates to index t.Cfg.StorageConfig.BoltDBShipperConfig.Mode = indexshipper.ModeReadOnly t.Cfg.StorageConfig.TSDBShipperConfig.Mode = indexshipper.ModeReadOnly @@ -1553,6 +1557,34 @@ func (t *Loki) initBloomCompactorRing() (services.Service, error) { return t.bloomCompactorRingManager, nil } +func (t *Loki) initBloomPlanner() (services.Service, error) { + if !t.Cfg.BloomBuild.Enabled { + return nil, nil + } + + logger := log.With(util_log.Logger, "component", "bloom-planner") + + return planner.New( + t.Cfg.BloomBuild.Planner, + logger, + prometheus.DefaultRegisterer, + ) +} + +func (t *Loki) initBloomBuilder() (services.Service, error) { + if !t.Cfg.BloomBuild.Enabled { + return nil, nil + } + + logger := log.With(util_log.Logger, "component", "bloom-worker") + + return builder.New( + t.Cfg.BloomBuild.Builder, + logger, + prometheus.DefaultRegisterer, + ) +} + func (t *Loki) initQueryScheduler() (services.Service, error) { s, err := scheduler.NewScheduler(t.Cfg.QueryScheduler, t.Overrides, util_log.Logger, t.querySchedulerRingManager, prometheus.DefaultRegisterer, t.Cfg.MetricsNamespace) if err != nil { From 31a13146ed5f631374b7d71d22b219286e3144db Mon Sep 17 00:00:00 2001 From: choeffer Date: Mon, 20 May 2024 16:39:25 +0200 Subject: [PATCH 37/47] docs(install-monolithic): add quotation marks (#12982) Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com> --- docs/sources/setup/install/helm/install-monolithic/_index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/setup/install/helm/install-monolithic/_index.md b/docs/sources/setup/install/helm/install-monolithic/_index.md index 86344a9488d77..fd52c11c20034 100644 --- a/docs/sources/setup/install/helm/install-monolithic/_index.md +++ b/docs/sources/setup/install/helm/install-monolithic/_index.md @@ -47,7 +47,7 @@ If you set the `singleBinary.replicas` value to 2 or more, this chart configures type: 'filesystem' schemaConfig: configs: - - from: 2024-01-01 + - from: "2024-01-01" store: tsdb index: prefix: loki_index_ @@ -72,7 +72,7 @@ If you set the `singleBinary.replicas` value to 2 or more, this chart configures replication_factor: 3 schemaConfig: configs: - - from: 2024-01-01 + - from: "2024-01-01" store: tsdb index: prefix: loki_index_ From 94d610e5e0220da1c0bb65bdc9b46ea793dc7387 Mon Sep 17 00:00:00 2001 From: Yarden Shoham Date: Mon, 20 May 2024 18:05:50 +0300 Subject: [PATCH 38/47] docs: Fix broken link in the release notes (#12990) Co-authored-by: J Stickler --- docs/sources/release-notes/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/release-notes/_index.md b/docs/sources/release-notes/_index.md index db74b50c16d76..ebd9afda5afbd 100644 --- a/docs/sources/release-notes/_index.md +++ b/docs/sources/release-notes/_index.md @@ -8,7 +8,7 @@ weight: 100 Release notes for Loki are in the CHANGELOG for the release and listed here by version number. -- [V3.0 release notes](https://grafana.com/docs/loki//release-notes/v3.0/) +- [V3.0 release notes](https://grafana.com/docs/loki//release-notes/v3-0/) - [V2.9 release notes](https://grafana.com/docs/loki//release-notes/v2-9/) - [V2.8 release notes](https://grafana.com/docs/loki//release-notes/v2-8/) - [V2.7 release notes](https://grafana.com/docs/loki//release-notes/v2-7/) From 75ccf2160bfe647b1cb3daffb98869e9c1c44130 Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Mon, 20 May 2024 17:14:40 +0200 Subject: [PATCH 39/47] feat(blooms): Separate page buffer pools for series pages and bloom pages (#12992) Series pages are much smaller than bloom pages and therefore can make use of a separate buffer pool with different buckets. The second commit fixes a possible panic. Signed-off-by: Christian Haudum --- pkg/storage/bloom/v1/bloom.go | 14 +++++++------- pkg/storage/bloom/v1/index.go | 6 +++--- pkg/storage/bloom/v1/util.go | 17 ++++++++++++++--- 3 files changed, 24 insertions(+), 13 deletions(-) diff --git a/pkg/storage/bloom/v1/bloom.go b/pkg/storage/bloom/v1/bloom.go index 2f195e36df2b9..aa51762d4e4ec 100644 --- a/pkg/storage/bloom/v1/bloom.go +++ b/pkg/storage/bloom/v1/bloom.go @@ -24,7 +24,7 @@ type Bloom struct { func (b *Bloom) Encode(enc *encoding.Encbuf) error { // divide by 8 b/c bloom capacity is measured in bits, but we want bytes - buf := bytes.NewBuffer(BlockPool.Get(int(b.Capacity() / 8))) + buf := bytes.NewBuffer(BloomPagePool.Get(int(b.Capacity() / 8))) // TODO(owen-d): have encoder implement writer directly so we don't need // to indirect via a buffer @@ -36,7 +36,7 @@ func (b *Bloom) Encode(enc *encoding.Encbuf) error { data := buf.Bytes() enc.PutUvarint(len(data)) // length of bloom filter enc.PutBytes(data) - BlockPool.Put(data[:0]) // release to pool + BloomPagePool.Put(data[:0]) // release to pool return nil } @@ -65,8 +65,8 @@ func (b *Bloom) Decode(dec *encoding.Decbuf) error { } func LazyDecodeBloomPage(r io.Reader, pool chunkenc.ReaderPool, page BloomPageHeader) (*BloomPageDecoder, error) { - data := BlockPool.Get(page.Len)[:page.Len] - defer BlockPool.Put(data) + data := BloomPagePool.Get(page.Len)[:page.Len] + defer BloomPagePool.Put(data) _, err := io.ReadFull(r, data) if err != nil { @@ -84,7 +84,7 @@ func LazyDecodeBloomPage(r io.Reader, pool chunkenc.ReaderPool, page BloomPageHe } defer pool.PutReader(decompressor) - b := BlockPool.Get(page.DecompressedLen)[:page.DecompressedLen] + b := BloomPagePool.Get(page.DecompressedLen)[:page.DecompressedLen] if _, err = io.ReadFull(decompressor, b); err != nil { return nil, errors.Wrap(err, "decompressing bloom page") @@ -101,7 +101,7 @@ func LazyDecodeBloomPageNoCompression(r io.Reader, page BloomPageHeader) (*Bloom if page.Len != page.DecompressedLen+4 { return nil, errors.New("the Len and DecompressedLen of the page do not match") } - data := BlockPool.Get(page.Len)[:page.Len] + data := BloomPagePool.Get(page.Len)[:page.Len] _, err := io.ReadFull(r, data) if err != nil { @@ -163,7 +163,7 @@ func (d *BloomPageDecoder) Relinquish() { d.data = nil if cap(data) > 0 { - BlockPool.Put(data) + BloomPagePool.Put(data) } } diff --git a/pkg/storage/bloom/v1/index.go b/pkg/storage/bloom/v1/index.go index 674a1a883dfba..a4bcd450650c0 100644 --- a/pkg/storage/bloom/v1/index.go +++ b/pkg/storage/bloom/v1/index.go @@ -155,7 +155,7 @@ func (b *BlockIndex) NewSeriesPageDecoder(r io.ReadSeeker, header SeriesPageHead defer func() { if err != nil { metrics.pagesSkipped.WithLabelValues(pageTypeSeries, skipReasonErr).Inc() - metrics.bytesSkipped.WithLabelValues(pageTypeSeries).Add(float64(header.DecompressedLen)) + metrics.bytesSkipped.WithLabelValues(pageTypeSeries, skipReasonErr).Add(float64(header.DecompressedLen)) } else { metrics.pagesRead.WithLabelValues(pageTypeSeries).Inc() metrics.bytesRead.WithLabelValues(pageTypeSeries).Add(float64(header.DecompressedLen)) @@ -166,8 +166,8 @@ func (b *BlockIndex) NewSeriesPageDecoder(r io.ReadSeeker, header SeriesPageHead return nil, errors.Wrap(err, "seeking to series page") } - data := BlockPool.Get(header.Len)[:header.Len] - defer BlockPool.Put(data) + data := SeriesPagePool.Get(header.Len)[:header.Len] + defer SeriesPagePool.Put(data) _, err = io.ReadFull(r, data) if err != nil { return nil, errors.Wrap(err, "reading series page") diff --git a/pkg/storage/bloom/v1/util.go b/pkg/storage/bloom/v1/util.go index 06cf1f6add227..22fb47e43e799 100644 --- a/pkg/storage/bloom/v1/util.go +++ b/pkg/storage/bloom/v1/util.go @@ -32,10 +32,21 @@ var ( }, } - // 4KB -> 128MB - BlockPool = BytePool{ + // buffer pool for series pages + // 1KB 2KB 4KB 8KB 16KB 32KB 64KB 128KB + SeriesPagePool = BytePool{ pool: pool.New( - 4<<10, 128<<20, 2, + 1<<10, 128<<10, 2, + func(size int) interface{} { + return make([]byte, size) + }), + } + + // buffer pool for bloom pages + // 128KB 256KB 512KB 1MB 2MB 4MB 8MB 16MB 32MB 64MB 128MB + BloomPagePool = BytePool{ + pool: pool.New( + 128<<10, 128<<20, 2, func(size int) interface{} { return make([]byte, size) }), From 8442dca9d2341471996a73a011f206630c67e857 Mon Sep 17 00:00:00 2001 From: Jay Clifford <45856600+Jayclifford345@users.noreply.github.com> Date: Mon, 20 May 2024 17:52:17 -0400 Subject: [PATCH 40/47] feat: Added getting started video (#12975) --- docs/sources/get-started/_index.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/sources/get-started/_index.md b/docs/sources/get-started/_index.md index f82d5f9f089c2..c85f383345fbb 100644 --- a/docs/sources/get-started/_index.md +++ b/docs/sources/get-started/_index.md @@ -7,6 +7,8 @@ description: Provides an overview of the steps for implementing Grafana Loki to # Get started with Grafana Loki +{{< youtube id="1uk8LtQqsZQ" >}} + Loki is a horizontally-scalable, highly-available, multi-tenant log aggregation system inspired by Prometheus. It is designed to be very cost effective and easy to operate. It does not index the contents of the logs, but rather a set of labels for each log stream. Because all Loki implementations are unique, the installation process is From 1f5291a4a3bd3c98c190d9a5dda32bbd78f18c3b Mon Sep 17 00:00:00 2001 From: Ashwanth Date: Tue, 21 May 2024 12:38:02 +0530 Subject: [PATCH 41/47] fix(indexstats): do not collect stats from "IndexStats" lookups for other query types (#12978) --- pkg/ingester/tailer.go | 2 ++ pkg/querier/queryrange/roundtrip.go | 25 ++----------------------- 2 files changed, 4 insertions(+), 23 deletions(-) diff --git a/pkg/ingester/tailer.go b/pkg/ingester/tailer.go index b39f42957360b..b2d6d9874ee24 100644 --- a/pkg/ingester/tailer.go +++ b/pkg/ingester/tailer.go @@ -6,6 +6,8 @@ import ( "sync" "time" + "go.uber.org/atomic" + "github.com/go-kit/log/level" "github.com/prometheus/prometheus/model/labels" "go.uber.org/atomic" diff --git a/pkg/querier/queryrange/roundtrip.go b/pkg/querier/queryrange/roundtrip.go index 61da06929fe14..35806abb38d47 100644 --- a/pkg/querier/queryrange/roundtrip.go +++ b/pkg/querier/queryrange/roundtrip.go @@ -274,7 +274,7 @@ func NewMiddleware( seriesRT = seriesTripperware.Wrap(next) labelsRT = labelsTripperware.Wrap(next) instantRT = instantMetricTripperware.Wrap(next) - statsRT = indexStatsTripperware.Wrap(next) + statsRT = base.MergeMiddlewares(StatsCollectorMiddleware(), indexStatsTripperware).Wrap(next) seriesVolumeRT = seriesVolumeTripperware.Wrap(next) detectedFieldsRT = detectedFieldsTripperware.Wrap(next) detectedLabelsRT = detectedLabelsTripperware.Wrap(next) @@ -1055,22 +1055,6 @@ func NewVolumeTripperware(cfg Config, log log.Logger, limits Limits, schema conf ), nil } -func statsTripperware(nextTW base.Middleware) base.Middleware { - return base.MiddlewareFunc(func(next base.Handler) base.Handler { - return base.HandlerFunc(func(ctx context.Context, r base.Request) (base.Response, error) { - cacheMiddlewares := []base.Middleware{ - StatsCollectorMiddleware(), - nextTW, - } - - // wrap nextRT with our new middleware - return base.MergeMiddlewares( - cacheMiddlewares..., - ).Wrap(next).Do(ctx, r) - }) - }) -} - func volumeRangeTripperware(nextTW base.Middleware) base.Middleware { return base.MiddlewareFunc(func(next base.Handler) base.Handler { return base.HandlerFunc(func(ctx context.Context, r base.Request) (base.Response, error) { @@ -1141,7 +1125,7 @@ func NewIndexStatsTripperware(cfg Config, log log.Logger, limits Limits, schema } } - tw, err := sharedIndexTripperware( + return sharedIndexTripperware( cacheMiddleware, cfg, merger, @@ -1152,11 +1136,6 @@ func NewIndexStatsTripperware(cfg Config, log log.Logger, limits Limits, schema schema, metricsNamespace, ) - if err != nil { - return nil, err - } - - return statsTripperware(tw), nil } func sharedIndexTripperware( From bf8a27887979b2337263c55bd23aead9eed6ea0f Mon Sep 17 00:00:00 2001 From: Ashwanth Date: Tue, 21 May 2024 12:56:07 +0530 Subject: [PATCH 42/47] chore: remove duplicate imports (#13001) --- pkg/ingester/tailer.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/ingester/tailer.go b/pkg/ingester/tailer.go index b2d6d9874ee24..b39f42957360b 100644 --- a/pkg/ingester/tailer.go +++ b/pkg/ingester/tailer.go @@ -6,8 +6,6 @@ import ( "sync" "time" - "go.uber.org/atomic" - "github.com/go-kit/log/level" "github.com/prometheus/prometheus/model/labels" "go.uber.org/atomic" From 7a3338ead82e4c577652ab86e9a55faf200ac05a Mon Sep 17 00:00:00 2001 From: Jonathan Davies Date: Tue, 21 May 2024 10:41:42 +0100 Subject: [PATCH 43/47] feat: loki/main.go: Log which config file path is used on startup (#12985) Co-authored-by: Michel Hollands <42814411+MichelHollands@users.noreply.github.com> --- cmd/loki/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/loki/main.go b/cmd/loki/main.go index d9f4613977872..401085b3aab11 100644 --- a/cmd/loki/main.go +++ b/cmd/loki/main.go @@ -118,6 +118,7 @@ func main() { } level.Info(util_log.Logger).Log("msg", "Starting Loki", "version", version.Info()) + level.Info(util_log.Logger).Log("msg", "Loading configuration file", "filename", config.ConfigFile) err = t.Run(loki.RunOpts{StartTime: startTime}) util_log.CheckFatal("running loki", err, util_log.Logger) From 319503643589163edce0b939de0beac074006a9f Mon Sep 17 00:00:00 2001 From: Salva Corts Date: Tue, 21 May 2024 13:12:24 +0200 Subject: [PATCH 44/47] refactor(bloom planner): Compute gaps and build tasks from metas and TSDBs (#12994) --- docs/sources/shared/configuration.md | 27 ++ pkg/bloombuild/planner/config.go | 29 +- pkg/bloombuild/planner/metrics.go | 36 +++ pkg/bloombuild/planner/planner.go | 407 +++++++++++++++++++++++- pkg/bloombuild/planner/planner_test.go | 321 +++++++++++++++++++ pkg/bloombuild/planner/tableIterator.go | 50 +++ pkg/bloombuild/planner/task.go | 22 ++ pkg/bloombuild/planner/tsdb.go | 261 +++++++++++++++ pkg/bloombuild/planner/tsdb_test.go | 105 ++++++ pkg/bloombuild/planner/util.go | 125 ++++++++ pkg/bloombuild/planner/util_test.go | 172 ++++++++++ pkg/loki/modules.go | 5 + pkg/util/limiter/combined_limits.go | 4 + pkg/validation/limits.go | 14 + 14 files changed, 1568 insertions(+), 10 deletions(-) create mode 100644 pkg/bloombuild/planner/planner_test.go create mode 100644 pkg/bloombuild/planner/tableIterator.go create mode 100644 pkg/bloombuild/planner/task.go create mode 100644 pkg/bloombuild/planner/tsdb.go create mode 100644 pkg/bloombuild/planner/tsdb_test.go create mode 100644 pkg/bloombuild/planner/util.go create mode 100644 pkg/bloombuild/planner/util_test.go diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index f59f6501c94e5..d30dce2f7775b 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -333,6 +333,23 @@ bloom_build: [enabled: | default = false] planner: + # Interval at which to re-run the bloom creation planning. + # CLI flag: -bloom-build.planner.interval + [planning_interval: | default = 8h] + + # Newest day-table offset (from today, inclusive) to build blooms for. + # Increase to lower cost by not re-writing data to object storage too + # frequently since recent data changes more often at the cost of not having + # blooms available as quickly. + # CLI flag: -bloom-build.planner.min-table-offset + [min_table_offset: | default = 1] + + # Oldest day-table offset (from today, inclusive) to compact. This can be + # used to lower cost by not trying to compact older data which doesn't + # change. This can be optimized by aligning it with the maximum + # `reject_old_samples_max_age` setting of any tenant. + # CLI flag: -bloom-build.planner.max-table-offset + [max_table_offset: | default = 2] builder: @@ -3382,6 +3399,16 @@ shard_streams: # CLI flag: -bloom-compactor.max-bloom-size [bloom_compactor_max_bloom_size: | default = 128MB] +# Experimental. Whether to create blooms for the tenant. +# CLI flag: -bloom-build.enable +[bloom_creation_enabled: | default = false] + +# Experimental. Number of splits to create for the series keyspace when building +# blooms. The series keyspace is split into this many parts to parallelize bloom +# creation. +# CLI flag: -bloom-build.split-keyspace-by +[bloom_split_series_keyspace_by: | default = 256] + # Experimental. Length of the n-grams created when computing blooms from log # lines. # CLI flag: -bloom-compactor.ngram-length diff --git a/pkg/bloombuild/planner/config.go b/pkg/bloombuild/planner/config.go index dd8cb315d9345..47b01c0b286e0 100644 --- a/pkg/bloombuild/planner/config.go +++ b/pkg/bloombuild/planner/config.go @@ -1,21 +1,40 @@ package planner -import "flag" +import ( + "flag" + "fmt" + "time" +) // Config configures the bloom-planner component. type Config struct { - // TODO: Add config + PlanningInterval time.Duration `yaml:"planning_interval"` + MinTableOffset int `yaml:"min_table_offset"` + MaxTableOffset int `yaml:"max_table_offset"` } // RegisterFlagsWithPrefix registers flags for the bloom-planner configuration. -func (cfg *Config) RegisterFlagsWithPrefix(_ string, _ *flag.FlagSet) { - // TODO: Register flags with flagsPrefix +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.DurationVar(&cfg.PlanningInterval, prefix+".interval", 8*time.Hour, "Interval at which to re-run the bloom creation planning.") + f.IntVar(&cfg.MinTableOffset, prefix+".min-table-offset", 1, "Newest day-table offset (from today, inclusive) to build blooms for. Increase to lower cost by not re-writing data to object storage too frequently since recent data changes more often at the cost of not having blooms available as quickly.") + // TODO(owen-d): ideally we'd set this per tenant based on their `reject_old_samples_max_age` setting, + // but due to how we need to discover tenants, we can't do that yet. Tenant+Period discovery is done by + // iterating the table periods in object storage and looking for tenants within that period. + // In order to have this done dynamically, we'd need to account for tenant specific overrides, which are also + // dynamically reloaded. + // I'm doing it the simple way for now. + f.IntVar(&cfg.MaxTableOffset, prefix+".max-table-offset", 2, "Oldest day-table offset (from today, inclusive) to compact. This can be used to lower cost by not trying to compact older data which doesn't change. This can be optimized by aligning it with the maximum `reject_old_samples_max_age` setting of any tenant.") } func (cfg *Config) Validate() error { + if cfg.MinTableOffset > cfg.MaxTableOffset { + return fmt.Errorf("min-table-offset (%d) must be less than or equal to max-table-offset (%d)", cfg.MinTableOffset, cfg.MaxTableOffset) + } + return nil } type Limits interface { - // TODO: Add limits + BloomCreationEnabled(tenantID string) bool + BloomSplitSeriesKeyspaceBy(tenantID string) int } diff --git a/pkg/bloombuild/planner/metrics.go b/pkg/bloombuild/planner/metrics.go index e9a9035e14df0..c0028237d9b1d 100644 --- a/pkg/bloombuild/planner/metrics.go +++ b/pkg/bloombuild/planner/metrics.go @@ -8,10 +8,19 @@ import ( const ( metricsNamespace = "loki" metricsSubsystem = "bloomplanner" + + statusSuccess = "success" + statusFailure = "failure" ) type Metrics struct { running prometheus.Gauge + + buildStarted prometheus.Counter + buildCompleted *prometheus.CounterVec + buildTime *prometheus.HistogramVec + + tenantsDiscovered prometheus.Counter } func NewMetrics(r prometheus.Registerer) *Metrics { @@ -22,5 +31,32 @@ func NewMetrics(r prometheus.Registerer) *Metrics { Name: "running", Help: "Value will be 1 if bloom planner is currently running on this instance", }), + + buildStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Namespace: metricsNamespace, + Subsystem: metricsSubsystem, + Name: "build_started_total", + Help: "Total number of builds started", + }), + buildCompleted: promauto.With(r).NewCounterVec(prometheus.CounterOpts{ + Namespace: metricsNamespace, + Subsystem: metricsSubsystem, + Name: "build_completed_total", + Help: "Total number of builds completed", + }, []string{"status"}), + buildTime: promauto.With(r).NewHistogramVec(prometheus.HistogramOpts{ + Namespace: metricsNamespace, + Subsystem: metricsSubsystem, + Name: "build_time_seconds", + Help: "Time spent during a builds cycle.", + Buckets: prometheus.DefBuckets, + }, []string{"status"}), + + tenantsDiscovered: promauto.With(r).NewCounter(prometheus.CounterOpts{ + Namespace: metricsNamespace, + Subsystem: metricsSubsystem, + Name: "tenants_discovered_total", + Help: "Number of tenants discovered during the current build iteration", + }), } } diff --git a/pkg/bloombuild/planner/planner.go b/pkg/bloombuild/planner/planner.go index 7732d180b0bb8..0be853a2f604a 100644 --- a/pkg/bloombuild/planner/planner.go +++ b/pkg/bloombuild/planner/planner.go @@ -2,33 +2,63 @@ package planner import ( "context" + "fmt" + "sort" + "time" "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/grafana/dskit/services" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/model" + "github.com/grafana/loki/v3/pkg/storage" + v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" + "github.com/grafana/loki/v3/pkg/storage/config" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" utillog "github.com/grafana/loki/v3/pkg/util/log" ) type Planner struct { services.Service - cfg Config + cfg Config + limits Limits + schemaCfg config.SchemaConfig + + tsdbStore TSDBStore + bloomStore bloomshipper.Store + metrics *Metrics logger log.Logger } func New( cfg Config, + limits Limits, + schemaCfg config.SchemaConfig, + storeCfg storage.Config, + storageMetrics storage.ClientMetrics, + bloomStore bloomshipper.Store, logger log.Logger, r prometheus.Registerer, ) (*Planner, error) { utillog.WarnExperimentalUse("Bloom Planner", logger) + tsdbStore, err := NewTSDBStores(schemaCfg, storeCfg, storageMetrics, logger) + if err != nil { + return nil, fmt.Errorf("error creating TSDB store: %w", err) + } + p := &Planner{ - cfg: cfg, - metrics: NewMetrics(r), - logger: logger, + cfg: cfg, + limits: limits, + schemaCfg: schemaCfg, + tsdbStore: tsdbStore, + bloomStore: bloomStore, + metrics: NewMetrics(r), + logger: logger, } p.Service = services.NewBasicService(p.starting, p.running, p.stopping) @@ -45,6 +75,373 @@ func (p *Planner) stopping(_ error) error { return nil } -func (p *Planner) running(_ context.Context) error { +func (p *Planner) running(ctx context.Context) error { + // run once at beginning + if err := p.runOne(ctx); err != nil { + level.Error(p.logger).Log("msg", "bloom build iteration failed for the first time", "err", err) + } + + ticker := time.NewTicker(p.cfg.PlanningInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + err := ctx.Err() + level.Debug(p.logger).Log("msg", "planner context done", "err", err) + return err + + case <-ticker.C: + if err := p.runOne(ctx); err != nil { + level.Error(p.logger).Log("msg", "bloom build iteration failed", "err", err) + } + } + } +} + +func (p *Planner) runOne(ctx context.Context) error { + var ( + start = time.Now() + status = statusFailure + ) + defer func() { + p.metrics.buildCompleted.WithLabelValues(status).Inc() + p.metrics.buildTime.WithLabelValues(status).Observe(time.Since(start).Seconds()) + }() + + p.metrics.buildStarted.Inc() + level.Info(p.logger).Log("msg", "running bloom build planning") + + tables := p.tables(time.Now()) + level.Debug(p.logger).Log("msg", "loaded tables", "tables", tables.TotalDays()) + + work, err := p.loadWork(ctx, tables) + if err != nil { + level.Error(p.logger).Log("msg", "error loading work", "err", err) + return fmt.Errorf("error loading work: %w", err) + } + + // TODO: Enqueue instead of buffering here + // This is just a placeholder for now + var tasks []Task + + for _, w := range work { + gaps, err := p.findGapsForBounds(ctx, w.tenant, w.table, w.ownershipRange) + if err != nil { + level.Error(p.logger).Log("msg", "error finding gaps", "err", err, "tenant", w.tenant, "table", w.table, "ownership", w.ownershipRange.String()) + return fmt.Errorf("error finding gaps for tenant (%s) in table (%s) for bounds (%s): %w", w.tenant, w.table, w.ownershipRange, err) + } + + for _, gap := range gaps { + tasks = append(tasks, Task{ + table: w.table.Addr(), + tenant: w.tenant, + OwnershipBounds: w.ownershipRange, + tsdb: gap.tsdb, + gaps: gap.gaps, + }) + } + } + + status = statusSuccess + level.Info(p.logger).Log( + "msg", "bloom build iteration completed", + "duration", time.Since(start).Seconds(), + "tasks", len(tasks), + ) return nil } + +func (p *Planner) tables(ts time.Time) *dayRangeIterator { + // adjust the minimum by one to make it inclusive, which is more intuitive + // for a configuration variable + adjustedMin := p.cfg.MinTableOffset - 1 + minCompactionDelta := time.Duration(adjustedMin) * config.ObjectStorageIndexRequiredPeriod + maxCompactionDelta := time.Duration(p.cfg.MaxTableOffset) * config.ObjectStorageIndexRequiredPeriod + + from := ts.Add(-maxCompactionDelta).UnixNano() / int64(config.ObjectStorageIndexRequiredPeriod) * int64(config.ObjectStorageIndexRequiredPeriod) + through := ts.Add(-minCompactionDelta).UnixNano() / int64(config.ObjectStorageIndexRequiredPeriod) * int64(config.ObjectStorageIndexRequiredPeriod) + + fromDay := config.NewDayTime(model.TimeFromUnixNano(from)) + throughDay := config.NewDayTime(model.TimeFromUnixNano(through)) + level.Debug(p.logger).Log("msg", "loaded tables for compaction", "from", fromDay, "through", throughDay) + return newDayRangeIterator(fromDay, throughDay, p.schemaCfg) +} + +type tenantTableRange struct { + tenant string + table config.DayTable + ownershipRange v1.FingerprintBounds + + // TODO: Add tracking + //finished bool + //queueTime, startTime, endTime time.Time +} + +func (p *Planner) loadWork( + ctx context.Context, + tables *dayRangeIterator, +) ([]tenantTableRange, error) { + var work []tenantTableRange + + for tables.Next() && tables.Err() == nil && ctx.Err() == nil { + table := tables.At() + level.Debug(p.logger).Log("msg", "loading work for table", "table", table) + + tenants, err := p.tenants(ctx, table) + if err != nil { + return nil, fmt.Errorf("error loading tenants: %w", err) + } + level.Debug(p.logger).Log("msg", "loaded tenants", "table", table, "tenants", tenants.Len()) + + for tenants.Next() && tenants.Err() == nil && ctx.Err() == nil { + p.metrics.tenantsDiscovered.Inc() + tenant := tenants.At() + + if !p.limits.BloomCreationEnabled(tenant) { + continue + } + + splitFactor := p.limits.BloomSplitSeriesKeyspaceBy(tenant) + bounds := SplitFingerprintKeyspaceByFactor(splitFactor) + + for _, bounds := range bounds { + work = append(work, tenantTableRange{ + tenant: tenant, + table: table, + ownershipRange: bounds, + }) + } + + level.Debug(p.logger).Log("msg", "loading work for tenant", "table", table, "tenant", tenant, "splitFactor", splitFactor) + } + if err := tenants.Err(); err != nil { + level.Error(p.logger).Log("msg", "error iterating tenants", "err", err) + return nil, fmt.Errorf("error iterating tenants: %w", err) + } + + } + if err := tables.Err(); err != nil { + level.Error(p.logger).Log("msg", "error iterating tables", "err", err) + return nil, fmt.Errorf("error iterating tables: %w", err) + } + + return work, ctx.Err() +} + +func (p *Planner) tenants(ctx context.Context, table config.DayTable) (*v1.SliceIter[string], error) { + tenants, err := p.tsdbStore.UsersForPeriod(ctx, table) + if err != nil { + return nil, fmt.Errorf("error loading tenants for table (%s): %w", table, err) + } + + return v1.NewSliceIter(tenants), nil +} + +/* +Planning works as follows, split across many functions for clarity: + 1. Fetch all meta.jsons for the given tenant and table which overlap the ownership range of this compactor. + 2. Load current TSDBs for this tenant/table. + 3. For each live TSDB (there should be only 1, but this works with multiple), find any gaps + (fingerprint ranges) which are not up-to-date, determined by checking other meta.json files and comparing + the TSDBs they were generated from as well as their ownership ranges. +*/ +func (p *Planner) findGapsForBounds( + ctx context.Context, + tenant string, + table config.DayTable, + ownershipRange v1.FingerprintBounds, +) ([]blockPlan, error) { + logger := log.With(p.logger, "org_id", tenant, "table", table.Addr(), "ownership", ownershipRange.String()) + + // Fetch source metas to be used in both build and cleanup of out-of-date metas+blooms + metas, err := p.bloomStore.FetchMetas( + ctx, + bloomshipper.MetaSearchParams{ + TenantID: tenant, + Interval: bloomshipper.NewInterval(table.Bounds()), + Keyspace: ownershipRange, + }, + ) + if err != nil { + level.Error(logger).Log("msg", "failed to get metas", "err", err) + return nil, fmt.Errorf("failed to get metas: %w", err) + } + + level.Debug(logger).Log("msg", "found relevant metas", "metas", len(metas)) + + // Find gaps in the TSDBs for this tenant/table + gaps, err := p.findOutdatedGaps(ctx, tenant, table, ownershipRange, metas, logger) + if err != nil { + return nil, fmt.Errorf("failed to find outdated gaps: %w", err) + } + + return gaps, nil +} + +// blockPlan is a plan for all the work needed to build a meta.json +// It includes: +// - the tsdb (source of truth) which contains all the series+chunks +// we need to ensure are indexed in bloom blocks +// - a list of gaps that are out of date and need to be checked+built +// - within each gap, a list of block refs which overlap the gap are included +// so we can use them to accelerate bloom generation. They likely contain many +// of the same chunks we need to ensure are indexed, just from previous tsdb iterations. +// This is a performance optimization to avoid expensive re-reindexing +type blockPlan struct { + tsdb tsdb.SingleTenantTSDBIdentifier + gaps []GapWithBlocks +} + +func (p *Planner) findOutdatedGaps( + ctx context.Context, + tenant string, + table config.DayTable, + ownershipRange v1.FingerprintBounds, + metas []bloomshipper.Meta, + logger log.Logger, +) ([]blockPlan, error) { + // Resolve TSDBs + tsdbs, err := p.tsdbStore.ResolveTSDBs(ctx, table, tenant) + if err != nil { + level.Error(logger).Log("msg", "failed to resolve tsdbs", "err", err) + return nil, fmt.Errorf("failed to resolve tsdbs: %w", err) + } + + if len(tsdbs) == 0 { + return nil, nil + } + + // Determine which TSDBs have gaps in the ownership range and need to + // be processed. + tsdbsWithGaps, err := gapsBetweenTSDBsAndMetas(ownershipRange, tsdbs, metas) + if err != nil { + level.Error(logger).Log("msg", "failed to find gaps", "err", err) + return nil, fmt.Errorf("failed to find gaps: %w", err) + } + + if len(tsdbsWithGaps) == 0 { + level.Debug(logger).Log("msg", "blooms exist for all tsdbs") + return nil, nil + } + + work, err := blockPlansForGaps(tsdbsWithGaps, metas) + if err != nil { + level.Error(logger).Log("msg", "failed to create plan", "err", err) + return nil, fmt.Errorf("failed to create plan: %w", err) + } + + return work, nil +} + +// Used to signal the gaps that need to be populated for a tsdb +type tsdbGaps struct { + tsdb tsdb.SingleTenantTSDBIdentifier + gaps []v1.FingerprintBounds +} + +// gapsBetweenTSDBsAndMetas returns if the metas are up-to-date with the TSDBs. This is determined by asserting +// that for each TSDB, there are metas covering the entire ownership range which were generated from that specific TSDB. +func gapsBetweenTSDBsAndMetas( + ownershipRange v1.FingerprintBounds, + tsdbs []tsdb.SingleTenantTSDBIdentifier, + metas []bloomshipper.Meta, +) (res []tsdbGaps, err error) { + for _, db := range tsdbs { + id := db.Name() + + relevantMetas := make([]v1.FingerprintBounds, 0, len(metas)) + for _, meta := range metas { + for _, s := range meta.Sources { + if s.Name() == id { + relevantMetas = append(relevantMetas, meta.Bounds) + } + } + } + + gaps, err := FindGapsInFingerprintBounds(ownershipRange, relevantMetas) + if err != nil { + return nil, err + } + + if len(gaps) > 0 { + res = append(res, tsdbGaps{ + tsdb: db, + gaps: gaps, + }) + } + } + + return res, err +} + +// blockPlansForGaps groups tsdb gaps we wish to fill with overlapping but out of date blocks. +// This allows us to expedite bloom generation by using existing blocks to fill in the gaps +// since many will contain the same chunks. +func blockPlansForGaps(tsdbs []tsdbGaps, metas []bloomshipper.Meta) ([]blockPlan, error) { + plans := make([]blockPlan, 0, len(tsdbs)) + + for _, idx := range tsdbs { + plan := blockPlan{ + tsdb: idx.tsdb, + gaps: make([]GapWithBlocks, 0, len(idx.gaps)), + } + + for _, gap := range idx.gaps { + planGap := GapWithBlocks{ + bounds: gap, + } + + for _, meta := range metas { + + if meta.Bounds.Intersection(gap) == nil { + // this meta doesn't overlap the gap, skip + continue + } + + for _, block := range meta.Blocks { + if block.Bounds.Intersection(gap) == nil { + // this block doesn't overlap the gap, skip + continue + } + // this block overlaps the gap, add it to the plan + // for this gap + planGap.blocks = append(planGap.blocks, block) + } + } + + // ensure we sort blocks so deduping iterator works as expected + sort.Slice(planGap.blocks, func(i, j int) bool { + return planGap.blocks[i].Bounds.Less(planGap.blocks[j].Bounds) + }) + + peekingBlocks := v1.NewPeekingIter[bloomshipper.BlockRef]( + v1.NewSliceIter[bloomshipper.BlockRef]( + planGap.blocks, + ), + ) + // dedupe blocks which could be in multiple metas + itr := v1.NewDedupingIter[bloomshipper.BlockRef, bloomshipper.BlockRef]( + func(a, b bloomshipper.BlockRef) bool { + return a == b + }, + v1.Identity[bloomshipper.BlockRef], + func(a, _ bloomshipper.BlockRef) bloomshipper.BlockRef { + return a + }, + peekingBlocks, + ) + + deduped, err := v1.Collect[bloomshipper.BlockRef](itr) + if err != nil { + return nil, fmt.Errorf("failed to dedupe blocks: %w", err) + } + planGap.blocks = deduped + + plan.gaps = append(plan.gaps, planGap) + } + + plans = append(plans, plan) + } + + return plans, nil +} diff --git a/pkg/bloombuild/planner/planner_test.go b/pkg/bloombuild/planner/planner_test.go new file mode 100644 index 0000000000000..346bd145ab8dc --- /dev/null +++ b/pkg/bloombuild/planner/planner_test.go @@ -0,0 +1,321 @@ +package planner + +import ( + "testing" + "time" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" +) + +func tsdbID(n int) tsdb.SingleTenantTSDBIdentifier { + return tsdb.SingleTenantTSDBIdentifier{ + TS: time.Unix(int64(n), 0), + } +} + +func genMeta(min, max model.Fingerprint, sources []int, blocks []bloomshipper.BlockRef) bloomshipper.Meta { + m := bloomshipper.Meta{ + MetaRef: bloomshipper.MetaRef{ + Ref: bloomshipper.Ref{ + Bounds: v1.NewBounds(min, max), + }, + }, + Blocks: blocks, + } + for _, source := range sources { + m.Sources = append(m.Sources, tsdbID(source)) + } + return m +} + +func Test_gapsBetweenTSDBsAndMetas(t *testing.T) { + + for _, tc := range []struct { + desc string + err bool + exp []tsdbGaps + ownershipRange v1.FingerprintBounds + tsdbs []tsdb.SingleTenantTSDBIdentifier + metas []bloomshipper.Meta + }{ + { + desc: "non-overlapping tsdbs and metas", + err: true, + ownershipRange: v1.NewBounds(0, 10), + tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)}, + metas: []bloomshipper.Meta{ + genMeta(11, 20, []int{0}, nil), + }, + }, + { + desc: "single tsdb", + ownershipRange: v1.NewBounds(0, 10), + tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)}, + metas: []bloomshipper.Meta{ + genMeta(4, 8, []int{0}, nil), + }, + exp: []tsdbGaps{ + { + tsdb: tsdbID(0), + gaps: []v1.FingerprintBounds{ + v1.NewBounds(0, 3), + v1.NewBounds(9, 10), + }, + }, + }, + }, + { + desc: "multiple tsdbs with separate blocks", + ownershipRange: v1.NewBounds(0, 10), + tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0), tsdbID(1)}, + metas: []bloomshipper.Meta{ + genMeta(0, 5, []int{0}, nil), + genMeta(6, 10, []int{1}, nil), + }, + exp: []tsdbGaps{ + { + tsdb: tsdbID(0), + gaps: []v1.FingerprintBounds{ + v1.NewBounds(6, 10), + }, + }, + { + tsdb: tsdbID(1), + gaps: []v1.FingerprintBounds{ + v1.NewBounds(0, 5), + }, + }, + }, + }, + { + desc: "multiple tsdbs with the same blocks", + ownershipRange: v1.NewBounds(0, 10), + tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0), tsdbID(1)}, + metas: []bloomshipper.Meta{ + genMeta(0, 5, []int{0, 1}, nil), + genMeta(6, 8, []int{1}, nil), + }, + exp: []tsdbGaps{ + { + tsdb: tsdbID(0), + gaps: []v1.FingerprintBounds{ + v1.NewBounds(6, 10), + }, + }, + { + tsdb: tsdbID(1), + gaps: []v1.FingerprintBounds{ + v1.NewBounds(9, 10), + }, + }, + }, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + gaps, err := gapsBetweenTSDBsAndMetas(tc.ownershipRange, tc.tsdbs, tc.metas) + if tc.err { + require.Error(t, err) + return + } + require.Equal(t, tc.exp, gaps) + }) + } +} + +func genBlockRef(min, max model.Fingerprint) bloomshipper.BlockRef { + bounds := v1.NewBounds(min, max) + return bloomshipper.BlockRef{ + Ref: bloomshipper.Ref{ + Bounds: bounds, + }, + } +} + +func Test_blockPlansForGaps(t *testing.T) { + for _, tc := range []struct { + desc string + ownershipRange v1.FingerprintBounds + tsdbs []tsdb.SingleTenantTSDBIdentifier + metas []bloomshipper.Meta + err bool + exp []blockPlan + }{ + { + desc: "single overlapping meta+no overlapping block", + ownershipRange: v1.NewBounds(0, 10), + tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)}, + metas: []bloomshipper.Meta{ + genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(11, 20)}), + }, + exp: []blockPlan{ + { + tsdb: tsdbID(0), + gaps: []GapWithBlocks{ + { + bounds: v1.NewBounds(0, 10), + }, + }, + }, + }, + }, + { + desc: "single overlapping meta+one overlapping block", + ownershipRange: v1.NewBounds(0, 10), + tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)}, + metas: []bloomshipper.Meta{ + genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(9, 20)}), + }, + exp: []blockPlan{ + { + tsdb: tsdbID(0), + gaps: []GapWithBlocks{ + { + bounds: v1.NewBounds(0, 10), + blocks: []bloomshipper.BlockRef{genBlockRef(9, 20)}, + }, + }, + }, + }, + }, + { + // the range which needs to be generated doesn't overlap with existing blocks + // from other tsdb versions since theres an up to date tsdb version block, + // but we can trim the range needing generation + desc: "trims up to date area", + ownershipRange: v1.NewBounds(0, 10), + tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)}, + metas: []bloomshipper.Meta{ + genMeta(9, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(9, 20)}), // block for same tsdb + genMeta(9, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(9, 20)}), // block for different tsdb + }, + exp: []blockPlan{ + { + tsdb: tsdbID(0), + gaps: []GapWithBlocks{ + { + bounds: v1.NewBounds(0, 8), + }, + }, + }, + }, + }, + { + desc: "uses old block for overlapping range", + ownershipRange: v1.NewBounds(0, 10), + tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)}, + metas: []bloomshipper.Meta{ + genMeta(9, 20, []int{0}, []bloomshipper.BlockRef{genBlockRef(9, 20)}), // block for same tsdb + genMeta(5, 20, []int{1}, []bloomshipper.BlockRef{genBlockRef(5, 20)}), // block for different tsdb + }, + exp: []blockPlan{ + { + tsdb: tsdbID(0), + gaps: []GapWithBlocks{ + { + bounds: v1.NewBounds(0, 8), + blocks: []bloomshipper.BlockRef{genBlockRef(5, 20)}, + }, + }, + }, + }, + }, + { + desc: "multi case", + ownershipRange: v1.NewBounds(0, 10), + tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0), tsdbID(1)}, // generate for both tsdbs + metas: []bloomshipper.Meta{ + genMeta(0, 2, []int{0}, []bloomshipper.BlockRef{ + genBlockRef(0, 1), + genBlockRef(1, 2), + }), // tsdb_0 + genMeta(6, 8, []int{0}, []bloomshipper.BlockRef{genBlockRef(6, 8)}), // tsdb_0 + + genMeta(3, 5, []int{1}, []bloomshipper.BlockRef{genBlockRef(3, 5)}), // tsdb_1 + genMeta(8, 10, []int{1}, []bloomshipper.BlockRef{genBlockRef(8, 10)}), // tsdb_1 + }, + exp: []blockPlan{ + { + tsdb: tsdbID(0), + gaps: []GapWithBlocks{ + // tsdb (id=0) can source chunks from the blocks built from tsdb (id=1) + { + bounds: v1.NewBounds(3, 5), + blocks: []bloomshipper.BlockRef{genBlockRef(3, 5)}, + }, + { + bounds: v1.NewBounds(9, 10), + blocks: []bloomshipper.BlockRef{genBlockRef(8, 10)}, + }, + }, + }, + // tsdb (id=1) can source chunks from the blocks built from tsdb (id=0) + { + tsdb: tsdbID(1), + gaps: []GapWithBlocks{ + { + bounds: v1.NewBounds(0, 2), + blocks: []bloomshipper.BlockRef{ + genBlockRef(0, 1), + genBlockRef(1, 2), + }, + }, + { + bounds: v1.NewBounds(6, 7), + blocks: []bloomshipper.BlockRef{genBlockRef(6, 8)}, + }, + }, + }, + }, + }, + { + desc: "dedupes block refs", + ownershipRange: v1.NewBounds(0, 10), + tsdbs: []tsdb.SingleTenantTSDBIdentifier{tsdbID(0)}, + metas: []bloomshipper.Meta{ + genMeta(9, 20, []int{1}, []bloomshipper.BlockRef{ + genBlockRef(1, 4), + genBlockRef(9, 20), + }), // blocks for first diff tsdb + genMeta(5, 20, []int{2}, []bloomshipper.BlockRef{ + genBlockRef(5, 10), + genBlockRef(9, 20), // same block references in prior meta (will be deduped) + }), // block for second diff tsdb + }, + exp: []blockPlan{ + { + tsdb: tsdbID(0), + gaps: []GapWithBlocks{ + { + bounds: v1.NewBounds(0, 10), + blocks: []bloomshipper.BlockRef{ + genBlockRef(1, 4), + genBlockRef(5, 10), + genBlockRef(9, 20), + }, + }, + }, + }, + }, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + // we reuse the gapsBetweenTSDBsAndMetas function to generate the gaps as this function is tested + // separately and it's used to generate input in our regular code path (easier to write tests this way). + gaps, err := gapsBetweenTSDBsAndMetas(tc.ownershipRange, tc.tsdbs, tc.metas) + require.NoError(t, err) + + plans, err := blockPlansForGaps(gaps, tc.metas) + if tc.err { + require.Error(t, err) + return + } + require.Equal(t, tc.exp, plans) + + }) + } +} diff --git a/pkg/bloombuild/planner/tableIterator.go b/pkg/bloombuild/planner/tableIterator.go new file mode 100644 index 0000000000000..c17458a04806c --- /dev/null +++ b/pkg/bloombuild/planner/tableIterator.go @@ -0,0 +1,50 @@ +package planner + +import ( + "fmt" + + "github.com/grafana/loki/v3/pkg/storage/config" +) + +type dayRangeIterator struct { + min, max, cur config.DayTime + curPeriod config.PeriodConfig + schemaCfg config.SchemaConfig + err error +} + +func newDayRangeIterator(min, max config.DayTime, schemaCfg config.SchemaConfig) *dayRangeIterator { + return &dayRangeIterator{min: min, max: max, cur: min.Dec(), schemaCfg: schemaCfg} +} + +func (r *dayRangeIterator) TotalDays() int { + offset := r.cur + if r.cur.Before(r.min) { + offset = r.min + } + return int(r.max.Sub(offset.Time) / config.ObjectStorageIndexRequiredPeriod) +} + +func (r *dayRangeIterator) Next() bool { + r.cur = r.cur.Inc() + if !r.cur.Before(r.max) { + return false + } + + period, err := r.schemaCfg.SchemaForTime(r.cur.ModelTime()) + if err != nil { + r.err = fmt.Errorf("getting schema for time (%s): %w", r.cur, err) + return false + } + r.curPeriod = period + + return true +} + +func (r *dayRangeIterator) At() config.DayTable { + return config.NewDayTable(r.cur, r.curPeriod.IndexTables.Prefix) +} + +func (r *dayRangeIterator) Err() error { + return nil +} diff --git a/pkg/bloombuild/planner/task.go b/pkg/bloombuild/planner/task.go new file mode 100644 index 0000000000000..80f730c4fb6dd --- /dev/null +++ b/pkg/bloombuild/planner/task.go @@ -0,0 +1,22 @@ +package planner + +import ( + v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" +) + +// TODO: Extract this definiton to a proto file at pkg/bloombuild/protos/protos.proto + +type GapWithBlocks struct { + bounds v1.FingerprintBounds + blocks []bloomshipper.BlockRef +} + +type Task struct { + table string + tenant string + OwnershipBounds v1.FingerprintBounds + tsdb tsdb.SingleTenantTSDBIdentifier + gaps []GapWithBlocks +} diff --git a/pkg/bloombuild/planner/tsdb.go b/pkg/bloombuild/planner/tsdb.go new file mode 100644 index 0000000000000..7c15c43306db2 --- /dev/null +++ b/pkg/bloombuild/planner/tsdb.go @@ -0,0 +1,261 @@ +package planner + +import ( + "context" + "fmt" + "io" + "math" + "path" + "strings" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + + "github.com/grafana/loki/v3/pkg/chunkenc" + baseStore "github.com/grafana/loki/v3/pkg/storage" + v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" + "github.com/grafana/loki/v3/pkg/storage/config" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/storage" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/sharding" + "github.com/grafana/loki/v3/pkg/storage/types" +) + +const ( + gzipExtension = ".gz" +) + +type TSDBStore interface { + UsersForPeriod(ctx context.Context, table config.DayTable) ([]string, error) + ResolveTSDBs(ctx context.Context, table config.DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) + LoadTSDB( + ctx context.Context, + table config.DayTable, + tenant string, + id tsdb.Identifier, + bounds v1.FingerprintBounds, + ) (v1.Iterator[*v1.Series], error) +} + +// BloomTSDBStore is a wrapper around the storage.Client interface which +// implements the TSDBStore interface for this pkg. +type BloomTSDBStore struct { + storage storage.Client + logger log.Logger +} + +func NewBloomTSDBStore(storage storage.Client, logger log.Logger) *BloomTSDBStore { + return &BloomTSDBStore{ + storage: storage, + logger: logger, + } +} + +func (b *BloomTSDBStore) UsersForPeriod(ctx context.Context, table config.DayTable) ([]string, error) { + _, users, err := b.storage.ListFiles(ctx, table.Addr(), true) // bypass cache for ease of testing + return users, err +} + +func (b *BloomTSDBStore) ResolveTSDBs(ctx context.Context, table config.DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) { + indices, err := b.storage.ListUserFiles(ctx, table.Addr(), tenant, true) // bypass cache for ease of testing + if err != nil { + return nil, errors.Wrap(err, "failed to list user files") + } + + ids := make([]tsdb.SingleTenantTSDBIdentifier, 0, len(indices)) + for _, index := range indices { + key := index.Name + if decompress := storage.IsCompressedFile(index.Name); decompress { + key = strings.TrimSuffix(key, gzipExtension) + } + + id, ok := tsdb.ParseSingleTenantTSDBPath(path.Base(key)) + if !ok { + return nil, errors.Errorf("failed to parse single tenant tsdb path: %s", key) + } + + ids = append(ids, id) + + } + return ids, nil +} + +func (b *BloomTSDBStore) LoadTSDB( + ctx context.Context, + table config.DayTable, + tenant string, + id tsdb.Identifier, + bounds v1.FingerprintBounds, +) (v1.Iterator[*v1.Series], error) { + withCompression := id.Name() + gzipExtension + + data, err := b.storage.GetUserFile(ctx, table.Addr(), tenant, withCompression) + if err != nil { + return nil, errors.Wrap(err, "failed to get file") + } + defer data.Close() + + decompressorPool := chunkenc.GetReaderPool(chunkenc.EncGZIP) + decompressor, err := decompressorPool.GetReader(data) + if err != nil { + return nil, errors.Wrap(err, "failed to get decompressor") + } + defer decompressorPool.PutReader(decompressor) + + buf, err := io.ReadAll(decompressor) + if err != nil { + return nil, errors.Wrap(err, "failed to read file") + } + + reader, err := index.NewReader(index.RealByteSlice(buf)) + if err != nil { + return nil, errors.Wrap(err, "failed to create index reader") + } + + idx := tsdb.NewTSDBIndex(reader) + defer func() { + if err := idx.Close(); err != nil { + level.Error(b.logger).Log("msg", "failed to close index", "err", err) + } + }() + + return NewTSDBSeriesIter(ctx, tenant, idx, bounds) +} + +func NewTSDBSeriesIter(ctx context.Context, user string, f sharding.ForSeries, bounds v1.FingerprintBounds) (v1.Iterator[*v1.Series], error) { + // TODO(salvacorts): Create a pool + series := make([]*v1.Series, 0, 100) + + if err := f.ForSeries( + ctx, + user, + bounds, + 0, math.MaxInt64, + func(_ labels.Labels, fp model.Fingerprint, chks []index.ChunkMeta) (stop bool) { + select { + case <-ctx.Done(): + return true + default: + res := &v1.Series{ + Fingerprint: fp, + Chunks: make(v1.ChunkRefs, 0, len(chks)), + } + for _, chk := range chks { + res.Chunks = append(res.Chunks, v1.ChunkRef{ + From: model.Time(chk.MinTime), + Through: model.Time(chk.MaxTime), + Checksum: chk.Checksum, + }) + } + + series = append(series, res) + return false + } + }, + labels.MustNewMatcher(labels.MatchEqual, "", ""), + ); err != nil { + return nil, err + } + + select { + case <-ctx.Done(): + return v1.NewEmptyIter[*v1.Series](), ctx.Err() + default: + return v1.NewCancelableIter[*v1.Series](ctx, v1.NewSliceIter[*v1.Series](series)), nil + } +} + +type TSDBStores struct { + schemaCfg config.SchemaConfig + stores []TSDBStore +} + +func NewTSDBStores( + schemaCfg config.SchemaConfig, + storeCfg baseStore.Config, + clientMetrics baseStore.ClientMetrics, + logger log.Logger, +) (*TSDBStores, error) { + res := &TSDBStores{ + schemaCfg: schemaCfg, + stores: make([]TSDBStore, len(schemaCfg.Configs)), + } + + for i, cfg := range schemaCfg.Configs { + if cfg.IndexType == types.TSDBType { + + c, err := baseStore.NewObjectClient(cfg.ObjectType, storeCfg, clientMetrics) + if err != nil { + return nil, errors.Wrap(err, "failed to create object client") + } + res.stores[i] = NewBloomTSDBStore(storage.NewIndexStorageClient(c, cfg.IndexTables.PathPrefix), logger) + } + } + + return res, nil +} + +func (s *TSDBStores) storeForPeriod(table config.DayTime) (TSDBStore, error) { + for i := len(s.schemaCfg.Configs) - 1; i >= 0; i-- { + period := s.schemaCfg.Configs[i] + + if !table.Before(period.From) { + // we have the desired period config + + if s.stores[i] != nil { + // valid: it's of tsdb type + return s.stores[i], nil + } + + // invalid + return nil, errors.Errorf( + "store for period is not of TSDB type (%s) while looking up store for (%v)", + period.IndexType, + table, + ) + } + + } + + return nil, fmt.Errorf( + "there is no store matching no matching period found for table (%v) -- too early", + table, + ) +} + +func (s *TSDBStores) UsersForPeriod(ctx context.Context, table config.DayTable) ([]string, error) { + store, err := s.storeForPeriod(table.DayTime) + if err != nil { + return nil, err + } + + return store.UsersForPeriod(ctx, table) +} + +func (s *TSDBStores) ResolveTSDBs(ctx context.Context, table config.DayTable, tenant string) ([]tsdb.SingleTenantTSDBIdentifier, error) { + store, err := s.storeForPeriod(table.DayTime) + if err != nil { + return nil, err + } + + return store.ResolveTSDBs(ctx, table, tenant) +} + +func (s *TSDBStores) LoadTSDB( + ctx context.Context, + table config.DayTable, + tenant string, + id tsdb.Identifier, + bounds v1.FingerprintBounds, +) (v1.Iterator[*v1.Series], error) { + store, err := s.storeForPeriod(table.DayTime) + if err != nil { + return nil, err + } + + return store.LoadTSDB(ctx, table, tenant, id, bounds) +} diff --git a/pkg/bloombuild/planner/tsdb_test.go b/pkg/bloombuild/planner/tsdb_test.go new file mode 100644 index 0000000000000..f47c193c2cd18 --- /dev/null +++ b/pkg/bloombuild/planner/tsdb_test.go @@ -0,0 +1,105 @@ +package planner + +import ( + "context" + "math" + "testing" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" + + v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" + "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb/index" +) + +type forSeriesTestImpl []*v1.Series + +func (f forSeriesTestImpl) ForSeries( + _ context.Context, + _ string, + _ index.FingerprintFilter, + _ model.Time, + _ model.Time, + fn func(labels.Labels, model.Fingerprint, []index.ChunkMeta) bool, + _ ...*labels.Matcher, +) error { + for i := range f { + unmapped := make([]index.ChunkMeta, 0, len(f[i].Chunks)) + for _, c := range f[i].Chunks { + unmapped = append(unmapped, index.ChunkMeta{ + MinTime: int64(c.From), + MaxTime: int64(c.Through), + Checksum: c.Checksum, + }) + } + + fn(nil, f[i].Fingerprint, unmapped) + } + return nil +} + +func (f forSeriesTestImpl) Close() error { + return nil +} + +func TestTSDBSeriesIter(t *testing.T) { + input := []*v1.Series{ + { + Fingerprint: 1, + Chunks: []v1.ChunkRef{ + { + From: 0, + Through: 1, + Checksum: 2, + }, + { + From: 3, + Through: 4, + Checksum: 5, + }, + }, + }, + } + srcItr := v1.NewSliceIter(input) + itr, err := NewTSDBSeriesIter(context.Background(), "", forSeriesTestImpl(input), v1.NewBounds(0, math.MaxUint64)) + require.NoError(t, err) + + v1.EqualIterators[*v1.Series]( + t, + func(a, b *v1.Series) { + require.Equal(t, a, b) + }, + itr, + srcItr, + ) +} + +func TestTSDBSeriesIter_Expiry(t *testing.T) { + t.Run("expires on creation", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + itr, err := NewTSDBSeriesIter(ctx, "", forSeriesTestImpl{ + {}, // a single entry + }, v1.NewBounds(0, math.MaxUint64)) + require.Error(t, err) + require.False(t, itr.Next()) + }) + + t.Run("expires during consumption", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + itr, err := NewTSDBSeriesIter(ctx, "", forSeriesTestImpl{ + {}, + {}, + }, v1.NewBounds(0, math.MaxUint64)) + require.NoError(t, err) + + require.True(t, itr.Next()) + require.NoError(t, itr.Err()) + + cancel() + require.False(t, itr.Next()) + require.Error(t, itr.Err()) + }) + +} diff --git a/pkg/bloombuild/planner/util.go b/pkg/bloombuild/planner/util.go new file mode 100644 index 0000000000000..f9a97587f802f --- /dev/null +++ b/pkg/bloombuild/planner/util.go @@ -0,0 +1,125 @@ +package planner + +import ( + "fmt" + "math" + + "github.com/prometheus/common/model" + + v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" +) + +// SplitFingerprintKeyspaceByFactor splits the keyspace covered by model.Fingerprint into contiguous non-overlapping ranges. +func SplitFingerprintKeyspaceByFactor(factor int) []v1.FingerprintBounds { + if factor <= 0 { + return nil + } + + bounds := make([]v1.FingerprintBounds, 0, factor) + + // The keyspace of a Fingerprint is from 0 to max uint64. + keyspaceSize := uint64(math.MaxUint64) + + // Calculate the size of each range. + rangeSize := keyspaceSize / uint64(factor) + + for i := 0; i < factor; i++ { + // Calculate the start and end of the range. + start := uint64(i) * rangeSize + end := start + rangeSize - 1 + + // For the last range, make sure it ends at the end of the keyspace. + if i == factor-1 { + end = keyspaceSize + } + + // Create a FingerprintBounds for the range and add it to the slice. + bounds = append(bounds, v1.FingerprintBounds{ + Min: model.Fingerprint(start), + Max: model.Fingerprint(end), + }) + } + + return bounds +} + +func FindGapsInFingerprintBounds(ownershipRange v1.FingerprintBounds, metas []v1.FingerprintBounds) (gaps []v1.FingerprintBounds, err error) { + if len(metas) == 0 { + return []v1.FingerprintBounds{ownershipRange}, nil + } + + // turn the available metas into a list of non-overlapping metas + // for easier processing + var nonOverlapping []v1.FingerprintBounds + // First, we reduce the metas into a smaller set by combining overlaps. They must be sorted. + var cur *v1.FingerprintBounds + for i := 0; i < len(metas); i++ { + j := i + 1 + + // first iteration (i == 0), set the current meta + if cur == nil { + cur = &metas[i] + } + + if j >= len(metas) { + // We've reached the end of the list. Add the last meta to the non-overlapping set. + nonOverlapping = append(nonOverlapping, *cur) + break + } + + combined := cur.Union(metas[j]) + if len(combined) == 1 { + // There was an overlap between the two tested ranges. Combine them and keep going. + cur = &combined[0] + continue + } + + // There was no overlap between the two tested ranges. Add the first to the non-overlapping set. + // and keep the second for the next iteration. + nonOverlapping = append(nonOverlapping, combined[0]) + cur = &combined[1] + } + + // Now, detect gaps between the non-overlapping metas and the ownership range. + // The left bound of the ownership range will be adjusted as we go. + leftBound := ownershipRange.Min + for _, meta := range nonOverlapping { + + clippedMeta := meta.Intersection(ownershipRange) + // should never happen as long as we are only combining metas + // that intersect with the ownership range + if clippedMeta == nil { + return nil, fmt.Errorf("meta is not within ownership range: %v", meta) + } + + searchRange := ownershipRange.Slice(leftBound, clippedMeta.Max) + // update the left bound for the next iteration + // We do the max to prevent the max bound to overflow from MaxUInt64 to 0 + leftBound = min( + max(clippedMeta.Max+1, clippedMeta.Max), + max(ownershipRange.Max+1, ownershipRange.Max), + ) + + // since we've already ensured that the meta is within the ownership range, + // we know the xor will be of length zero (when the meta is equal to the ownership range) + // or 1 (when the meta is a subset of the ownership range) + xors := searchRange.Unless(*clippedMeta) + if len(xors) == 0 { + // meta is equal to the ownership range. This means the meta + // covers this entire section of the ownership range. + continue + } + + gaps = append(gaps, xors[0]) + } + + // If the leftBound is less than the ownership range max, and it's smaller than MaxUInt64, + // There is a gap between the last meta and the end of the ownership range. + // Note: we check `leftBound < math.MaxUint64` since in the loop above we clamp the + // leftBound to MaxUint64 to prevent an overflow to 0: `max(clippedMeta.Max+1, clippedMeta.Max)` + if leftBound < math.MaxUint64 && leftBound <= ownershipRange.Max { + gaps = append(gaps, v1.NewBounds(leftBound, ownershipRange.Max)) + } + + return gaps, nil +} diff --git a/pkg/bloombuild/planner/util_test.go b/pkg/bloombuild/planner/util_test.go new file mode 100644 index 0000000000000..6755478ef7290 --- /dev/null +++ b/pkg/bloombuild/planner/util_test.go @@ -0,0 +1,172 @@ +package planner + +import ( + "math" + "testing" + + "github.com/prometheus/common/model" + "github.com/stretchr/testify/require" + + v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" +) + +func TestSplitFingerprintKeyspaceByFactor(t *testing.T) { + for _, tt := range []struct { + name string + factor int + }{ + { + name: "Factor is 0", + factor: 0, + }, + { + name: "Factor is 1", + factor: 1, + }, + { + name: "Factor is 256", + factor: 256, + }, + } { + t.Run(tt.name, func(t *testing.T) { + got := SplitFingerprintKeyspaceByFactor(tt.factor) + + if tt.factor == 0 { + require.Empty(t, got) + return + } + + // Check overall min and max values of the ranges. + require.Equal(t, model.Fingerprint(math.MaxUint64), got[len(got)-1].Max) + require.Equal(t, model.Fingerprint(0), got[0].Min) + + // For each range, check that the max value of the previous range is one less than the min value of the current range. + for i := 1; i < len(got); i++ { + require.Equal(t, got[i-1].Max+1, got[i].Min) + } + }) + } +} + +func Test_FindGapsInFingerprintBounds(t *testing.T) { + for _, tc := range []struct { + desc string + err bool + exp []v1.FingerprintBounds + ownershipRange v1.FingerprintBounds + metas []v1.FingerprintBounds + }{ + { + desc: "error nonoverlapping metas", + err: true, + exp: nil, + ownershipRange: v1.NewBounds(0, 10), + metas: []v1.FingerprintBounds{v1.NewBounds(11, 20)}, + }, + { + desc: "one meta with entire ownership range", + err: false, + exp: nil, + ownershipRange: v1.NewBounds(0, 10), + metas: []v1.FingerprintBounds{v1.NewBounds(0, 10)}, + }, + { + desc: "two non-overlapping metas with entire ownership range", + err: false, + exp: nil, + ownershipRange: v1.NewBounds(0, 10), + metas: []v1.FingerprintBounds{ + v1.NewBounds(0, 5), + v1.NewBounds(6, 10), + }, + }, + { + desc: "two overlapping metas with entire ownership range", + err: false, + exp: nil, + ownershipRange: v1.NewBounds(0, 10), + metas: []v1.FingerprintBounds{ + v1.NewBounds(0, 6), + v1.NewBounds(4, 10), + }, + }, + { + desc: "one meta with partial ownership range", + err: false, + exp: []v1.FingerprintBounds{ + v1.NewBounds(6, 10), + }, + ownershipRange: v1.NewBounds(0, 10), + metas: []v1.FingerprintBounds{ + v1.NewBounds(0, 5), + }, + }, + { + desc: "smaller subsequent meta with partial ownership range", + err: false, + exp: []v1.FingerprintBounds{ + v1.NewBounds(8, 10), + }, + ownershipRange: v1.NewBounds(0, 10), + metas: []v1.FingerprintBounds{ + v1.NewBounds(0, 7), + v1.NewBounds(3, 4), + }, + }, + { + desc: "hole in the middle", + err: false, + exp: []v1.FingerprintBounds{ + v1.NewBounds(4, 5), + }, + ownershipRange: v1.NewBounds(0, 10), + metas: []v1.FingerprintBounds{ + v1.NewBounds(0, 3), + v1.NewBounds(6, 10), + }, + }, + { + desc: "holes on either end", + err: false, + exp: []v1.FingerprintBounds{ + v1.NewBounds(0, 2), + v1.NewBounds(8, 10), + }, + ownershipRange: v1.NewBounds(0, 10), + metas: []v1.FingerprintBounds{ + v1.NewBounds(3, 5), + v1.NewBounds(6, 7), + }, + }, + { + desc: "full ownership range with single meta", + err: false, + exp: nil, + ownershipRange: v1.NewBounds(0, math.MaxUint64), + metas: []v1.FingerprintBounds{ + v1.NewBounds(0, math.MaxUint64), + }, + }, + { + desc: "full ownership range with multiple metas", + err: false, + exp: nil, + ownershipRange: v1.NewBounds(0, math.MaxUint64), + // Three metas covering the whole 0 - MaxUint64 + metas: []v1.FingerprintBounds{ + v1.NewBounds(0, math.MaxUint64/3), + v1.NewBounds(math.MaxUint64/3+1, math.MaxUint64/2), + v1.NewBounds(math.MaxUint64/2+1, math.MaxUint64), + }, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + gaps, err := FindGapsInFingerprintBounds(tc.ownershipRange, tc.metas) + if tc.err { + require.Error(t, err) + return + } + require.Equal(t, tc.exp, gaps) + }) + } +} diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index a563e80f789fe..e73369aca2d72 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -1566,6 +1566,11 @@ func (t *Loki) initBloomPlanner() (services.Service, error) { return planner.New( t.Cfg.BloomBuild.Planner, + t.Overrides, + t.Cfg.SchemaConfig, + t.Cfg.StorageConfig, + t.ClientMetrics, + t.BloomStore, logger, prometheus.DefaultRegisterer, ) diff --git a/pkg/util/limiter/combined_limits.go b/pkg/util/limiter/combined_limits.go index 39684c7b43e8e..92caf2c19d681 100644 --- a/pkg/util/limiter/combined_limits.go +++ b/pkg/util/limiter/combined_limits.go @@ -1,6 +1,8 @@ package limiter import ( + bloombuilder "github.com/grafana/loki/v3/pkg/bloombuild/builder" + bloomplanner "github.com/grafana/loki/v3/pkg/bloombuild/planner" "github.com/grafana/loki/v3/pkg/bloomcompactor" "github.com/grafana/loki/v3/pkg/bloomgateway" "github.com/grafana/loki/v3/pkg/compactor" @@ -26,4 +28,6 @@ type CombinedLimits interface { indexgateway.Limits bloomgateway.Limits bloomcompactor.Limits + bloomplanner.Limits + bloombuilder.Limits } diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index ca33d1f4bf425..b0660686f5c1b 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -205,6 +205,9 @@ type Limits struct { BloomCompactorMaxBlockSize flagext.ByteSize `yaml:"bloom_compactor_max_block_size" json:"bloom_compactor_max_block_size" category:"experimental"` BloomCompactorMaxBloomSize flagext.ByteSize `yaml:"bloom_compactor_max_bloom_size" json:"bloom_compactor_max_bloom_size" category:"experimental"` + BloomCreationEnabled bool `yaml:"bloom_creation_enabled" json:"bloom_creation_enabled" category:"experimental"` + BloomSplitSeriesKeyspaceBy int `yaml:"bloom_split_series_keyspace_by" json:"bloom_split_series_keyspace_by" category:"experimental"` + BloomNGramLength int `yaml:"bloom_ngram_length" json:"bloom_ngram_length" category:"experimental"` BloomNGramSkip int `yaml:"bloom_ngram_skip" json:"bloom_ngram_skip" category:"experimental"` BloomFalsePositiveRate float64 `yaml:"bloom_false_positive_rate" json:"bloom_false_positive_rate" category:"experimental"` @@ -380,6 +383,9 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { ), ) + f.BoolVar(&l.BloomCreationEnabled, "bloom-build.enable", false, "Experimental. Whether to create blooms for the tenant.") + f.IntVar(&l.BloomSplitSeriesKeyspaceBy, "bloom-build.split-keyspace-by", 256, "Experimental. Number of splits to create for the series keyspace when building blooms. The series keyspace is split into this many parts to parallelize bloom creation.") + _ = l.BloomCompactorMaxBloomSize.Set(defaultBloomCompactorMaxBloomSize) f.Var(&l.BloomCompactorMaxBloomSize, "bloom-compactor.max-bloom-size", fmt.Sprintf( @@ -973,6 +979,14 @@ func (o *Overrides) BloomCompactorEnabled(userID string) bool { return o.getOverridesForUser(userID).BloomCompactorEnabled } +func (o *Overrides) BloomCreationEnabled(userID string) bool { + return o.getOverridesForUser(userID).BloomCreationEnabled +} + +func (o *Overrides) BloomSplitSeriesKeyspaceBy(userID string) int { + return o.getOverridesForUser(userID).BloomSplitSeriesKeyspaceBy +} + func (o *Overrides) BloomNGramLength(userID string) int { return o.getOverridesForUser(userID).BloomNGramLength } From d6f29fc789760318b048d005e14c91eba748b45e Mon Sep 17 00:00:00 2001 From: Vitor Gomes <41302394+vitoorgomes@users.noreply.github.com> Date: Wed, 22 May 2024 04:34:42 +1200 Subject: [PATCH 45/47] docs: update otlp ingestion with correct endpoint and add endpoint to reference api docs (#12996) --- docs/sources/reference/loki-http-api.md | 11 +++++++++++ docs/sources/send-data/otel/_index.md | 4 ++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/docs/sources/reference/loki-http-api.md b/docs/sources/reference/loki-http-api.md index 1f85a4b48bbca..e72a03ba51b0a 100644 --- a/docs/sources/reference/loki-http-api.md +++ b/docs/sources/reference/loki-http-api.md @@ -24,6 +24,7 @@ Authorization needs to be done separately, for example, using an open-source loa These endpoints are exposed by the `distributor`, `write`, and `all` components: - [`POST /loki/api/v1/push`](#ingest-logs) +- [`POST /otlp/v1/logs`](#ingest-logs-using-otlp) A [list of clients]({{< relref "../send-data" >}}) can be found in the clients documentation. @@ -260,6 +261,16 @@ curl -H "Content-Type: application/json" \ --data-raw '{"streams": [{ "stream": { "foo": "bar2" }, "values": [ [ "1570818238000000000", "fizzbuzz" ] ] }]}' ``` +## Ingest logs using OTLP + +```bash +POST /otlp/v1/logs +``` + +`/otlp/v1/logs` lets the OpenTelemetry Collector send logs to Loki using `otlphttp` procotol. + +For information on how to configure Loki, refer to the [OTel Collector topic](https://grafana.com/docs/loki//send-data/otel/). + ## Query logs at a single point in time ```bash diff --git a/docs/sources/send-data/otel/_index.md b/docs/sources/send-data/otel/_index.md index 27d092a81c095..b7a67fcb14d06 100644 --- a/docs/sources/send-data/otel/_index.md +++ b/docs/sources/send-data/otel/_index.md @@ -30,7 +30,7 @@ You need to make the following changes to the [OpenTelemetry Collector config](h ```yaml exporters: otlphttp: - endpoint: http://:3100/otlp + endpoint: http://:3100/otlp/v1/logs ``` And enable it in `service.pipelines`: @@ -57,7 +57,7 @@ exporters: otlphttp: auth: authenticator: basicauth/otlp - endpoint: http://:3100/otlp + endpoint: http://:3100/otlp/v1/logs service: extensions: [basicauth/otlp] From efd8f5dc1b3bb3313de1ed6b26750d5bd5632b16 Mon Sep 17 00:00:00 2001 From: Salva Corts Date: Wed, 22 May 2024 10:43:32 +0200 Subject: [PATCH 46/47] refactor(blooms): Add queue to bloom planner and enqueue tasks (#13005) --- docs/sources/shared/configuration.md | 9 ++++ pkg/bloombuild/planner/config.go | 32 ++++++++++-- pkg/bloombuild/planner/metrics.go | 40 ++++++++++++++- pkg/bloombuild/planner/planner.go | 73 +++++++++++++++++++++------- pkg/bloombuild/planner/task.go | 7 +++ pkg/validation/limits.go | 6 +++ 6 files changed, 145 insertions(+), 22 deletions(-) diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index d30dce2f7775b..acf11102be511 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -351,6 +351,10 @@ bloom_build: # CLI flag: -bloom-build.planner.max-table-offset [max_table_offset: | default = 2] + # Maximum number of tasks to queue per tenant. + # CLI flag: -bloom-build.planner.max-tasks-per-tenant + [max_queued_tasks_per_tenant: | default = 30000] + builder: # Experimental: The bloom_gateway block configures the Loki bloom gateway @@ -3409,6 +3413,11 @@ shard_streams: # CLI flag: -bloom-build.split-keyspace-by [bloom_split_series_keyspace_by: | default = 256] +# Experimental. Maximum number of builders to use when building blooms. 0 allows +# unlimited builders. +# CLI flag: -bloom-build.max-builders +[bloom_build_max_builders: | default = 0] + # Experimental. Length of the n-grams created when computing blooms from log # lines. # CLI flag: -bloom-compactor.ngram-length diff --git a/pkg/bloombuild/planner/config.go b/pkg/bloombuild/planner/config.go index 47b01c0b286e0..aff25873b12f4 100644 --- a/pkg/bloombuild/planner/config.go +++ b/pkg/bloombuild/planner/config.go @@ -8,9 +8,10 @@ import ( // Config configures the bloom-planner component. type Config struct { - PlanningInterval time.Duration `yaml:"planning_interval"` - MinTableOffset int `yaml:"min_table_offset"` - MaxTableOffset int `yaml:"max_table_offset"` + PlanningInterval time.Duration `yaml:"planning_interval"` + MinTableOffset int `yaml:"min_table_offset"` + MaxTableOffset int `yaml:"max_table_offset"` + MaxQueuedTasksPerTenant int `yaml:"max_queued_tasks_per_tenant"` } // RegisterFlagsWithPrefix registers flags for the bloom-planner configuration. @@ -24,6 +25,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { // dynamically reloaded. // I'm doing it the simple way for now. f.IntVar(&cfg.MaxTableOffset, prefix+".max-table-offset", 2, "Oldest day-table offset (from today, inclusive) to compact. This can be used to lower cost by not trying to compact older data which doesn't change. This can be optimized by aligning it with the maximum `reject_old_samples_max_age` setting of any tenant.") + f.IntVar(&cfg.MaxQueuedTasksPerTenant, prefix+".max-tasks-per-tenant", 30000, "Maximum number of tasks to queue per tenant.") } func (cfg *Config) Validate() error { @@ -37,4 +39,28 @@ func (cfg *Config) Validate() error { type Limits interface { BloomCreationEnabled(tenantID string) bool BloomSplitSeriesKeyspaceBy(tenantID string) int + BloomBuildMaxBuilders(tenantID string) int +} + +type QueueLimits struct { + limits Limits +} + +func NewQueueLimits(limits Limits) *QueueLimits { + return &QueueLimits{limits: limits} +} + +// MaxConsumers is used to compute how many of the available builders are allowed to handle tasks for a given tenant. +// 0 is returned when neither limits are applied. 0 means all builders can be used. +func (c *QueueLimits) MaxConsumers(tenantID string, allConsumers int) int { + if c == nil || c.limits == nil { + return 0 + } + + maxBuilders := c.limits.BloomBuildMaxBuilders(tenantID) + if maxBuilders == 0 { + return 0 + } + + return min(allConsumers, maxBuilders) } diff --git a/pkg/bloombuild/planner/metrics.go b/pkg/bloombuild/planner/metrics.go index c0028237d9b1d..347af1926617b 100644 --- a/pkg/bloombuild/planner/metrics.go +++ b/pkg/bloombuild/planner/metrics.go @@ -1,8 +1,12 @@ package planner import ( + "time" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/grafana/loki/v3/pkg/queue" ) const ( @@ -16,6 +20,11 @@ const ( type Metrics struct { running prometheus.Gauge + // Extra Queue metrics + connectedBuilders prometheus.GaugeFunc + queueDuration prometheus.Histogram + inflightRequests prometheus.Summary + buildStarted prometheus.Counter buildCompleted *prometheus.CounterVec buildTime *prometheus.HistogramVec @@ -23,7 +32,10 @@ type Metrics struct { tenantsDiscovered prometheus.Counter } -func NewMetrics(r prometheus.Registerer) *Metrics { +func NewMetrics( + r prometheus.Registerer, + getConnectedBuilders func() float64, +) *Metrics { return &Metrics{ running: promauto.With(r).NewGauge(prometheus.GaugeOpts{ Namespace: metricsNamespace, @@ -31,6 +43,28 @@ func NewMetrics(r prometheus.Registerer) *Metrics { Name: "running", Help: "Value will be 1 if bloom planner is currently running on this instance", }), + connectedBuilders: promauto.With(r).NewGaugeFunc(prometheus.GaugeOpts{ + Namespace: metricsNamespace, + Subsystem: metricsSubsystem, + Name: "connected_builders", + Help: "Number of builders currently connected to the planner.", + }, getConnectedBuilders), + queueDuration: promauto.With(r).NewHistogram(prometheus.HistogramOpts{ + Namespace: metricsNamespace, + Subsystem: metricsSubsystem, + Name: "queue_duration_seconds", + Help: "Time spend by tasks in queue before getting picked up by a builder.", + Buckets: prometheus.DefBuckets, + }), + inflightRequests: promauto.With(r).NewSummary(prometheus.SummaryOpts{ + Namespace: metricsNamespace, + Subsystem: metricsSubsystem, + Name: "inflight_tasks", + Help: "Number of inflight tasks (either queued or processing) sampled at a regular interval. Quantile buckets keep track of inflight tasks over the last 60s.", + Objectives: map[float64]float64{0.5: 0.05, 0.75: 0.02, 0.8: 0.02, 0.9: 0.01, 0.95: 0.01, 0.99: 0.001}, + MaxAge: time.Minute, + AgeBuckets: 6, + }), buildStarted: promauto.With(r).NewCounter(prometheus.CounterOpts{ Namespace: metricsNamespace, @@ -60,3 +94,7 @@ func NewMetrics(r prometheus.Registerer) *Metrics { }), } } + +func NewQueueMetrics(r prometheus.Registerer) *queue.Metrics { + return queue.NewMetrics(r, metricsNamespace, metricsSubsystem) +} diff --git a/pkg/bloombuild/planner/planner.go b/pkg/bloombuild/planner/planner.go index 0be853a2f604a..9a5b9f6dc238e 100644 --- a/pkg/bloombuild/planner/planner.go +++ b/pkg/bloombuild/planner/planner.go @@ -12,16 +12,21 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/grafana/loki/v3/pkg/queue" "github.com/grafana/loki/v3/pkg/storage" v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/config" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" + "github.com/grafana/loki/v3/pkg/util" utillog "github.com/grafana/loki/v3/pkg/util/log" ) type Planner struct { services.Service + // Subservices manager. + subservices *services.Manager + subservicesWatcher *services.FailureWatcher cfg Config limits Limits @@ -30,6 +35,9 @@ type Planner struct { tsdbStore TSDBStore bloomStore bloomshipper.Store + tasksQueue *queue.RequestQueue + activeUsers *util.ActiveUsersCleanupService + metrics *Metrics logger log.Logger } @@ -51,15 +59,34 @@ func New( return nil, fmt.Errorf("error creating TSDB store: %w", err) } + // Queue to manage tasks + queueMetrics := NewQueueMetrics(r) + tasksQueue := queue.NewRequestQueue(cfg.MaxQueuedTasksPerTenant, 0, NewQueueLimits(limits), queueMetrics) + + // Clean metrics for inactive users: do not have added tasks to the queue in the last 1 hour + activeUsers := util.NewActiveUsersCleanupService(5*time.Minute, 1*time.Hour, func(user string) { + queueMetrics.Cleanup(user) + }) + p := &Planner{ - cfg: cfg, - limits: limits, - schemaCfg: schemaCfg, - tsdbStore: tsdbStore, - bloomStore: bloomStore, - metrics: NewMetrics(r), - logger: logger, + cfg: cfg, + limits: limits, + schemaCfg: schemaCfg, + tsdbStore: tsdbStore, + bloomStore: bloomStore, + tasksQueue: tasksQueue, + activeUsers: activeUsers, + metrics: NewMetrics(r, tasksQueue.GetConnectedConsumersMetric), + logger: logger, + } + + svcs := []services.Service{p.tasksQueue, p.activeUsers} + p.subservices, err = services.NewManager(svcs...) + if err != nil { + return nil, fmt.Errorf("error creating subservices manager: %w", err) } + p.subservicesWatcher = services.NewFailureWatcher() + p.subservicesWatcher.WatchManager(p.subservices) p.Service = services.NewBasicService(p.starting, p.running, p.stopping) return p, nil @@ -91,6 +118,7 @@ func (p *Planner) running(ctx context.Context) error { return err case <-ticker.C: + level.Info(p.logger).Log("msg", "starting bloom build iteration") if err := p.runOne(ctx); err != nil { level.Error(p.logger).Log("msg", "bloom build iteration failed", "err", err) } @@ -109,44 +137,53 @@ func (p *Planner) runOne(ctx context.Context) error { }() p.metrics.buildStarted.Inc() - level.Info(p.logger).Log("msg", "running bloom build planning") tables := p.tables(time.Now()) level.Debug(p.logger).Log("msg", "loaded tables", "tables", tables.TotalDays()) work, err := p.loadWork(ctx, tables) if err != nil { - level.Error(p.logger).Log("msg", "error loading work", "err", err) return fmt.Errorf("error loading work: %w", err) } - // TODO: Enqueue instead of buffering here - // This is just a placeholder for now - var tasks []Task - + var totalTasks int for _, w := range work { + logger := log.With(p.logger, "tenant", w.tenant, "table", w.table.Addr(), "ownership", w.ownershipRange.String()) + gaps, err := p.findGapsForBounds(ctx, w.tenant, w.table, w.ownershipRange) if err != nil { - level.Error(p.logger).Log("msg", "error finding gaps", "err", err, "tenant", w.tenant, "table", w.table, "ownership", w.ownershipRange.String()) - return fmt.Errorf("error finding gaps for tenant (%s) in table (%s) for bounds (%s): %w", w.tenant, w.table, w.ownershipRange, err) + level.Error(logger).Log("msg", "error finding gaps", "err", err) + continue } + now := time.Now() for _, gap := range gaps { - tasks = append(tasks, Task{ + totalTasks++ + task := Task{ table: w.table.Addr(), tenant: w.tenant, OwnershipBounds: w.ownershipRange, tsdb: gap.tsdb, gaps: gap.gaps, - }) + + queueTime: now, + ctx: ctx, + } + + p.activeUsers.UpdateUserTimestamp(task.tenant, now) + if err := p.tasksQueue.Enqueue(task.tenant, nil, task, nil); err != nil { + level.Error(logger).Log("msg", "error enqueuing task", "err", err) + continue + } } } + level.Debug(p.logger).Log("msg", "planning completed", "tasks", totalTasks) + status = statusSuccess level.Info(p.logger).Log( "msg", "bloom build iteration completed", "duration", time.Since(start).Seconds(), - "tasks", len(tasks), ) return nil } diff --git a/pkg/bloombuild/planner/task.go b/pkg/bloombuild/planner/task.go index 80f730c4fb6dd..bff459fe17643 100644 --- a/pkg/bloombuild/planner/task.go +++ b/pkg/bloombuild/planner/task.go @@ -1,6 +1,9 @@ package planner import ( + "context" + "time" + v1 "github.com/grafana/loki/v3/pkg/storage/bloom/v1" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/bloomshipper" "github.com/grafana/loki/v3/pkg/storage/stores/shipper/indexshipper/tsdb" @@ -19,4 +22,8 @@ type Task struct { OwnershipBounds v1.FingerprintBounds tsdb tsdb.SingleTenantTSDBIdentifier gaps []GapWithBlocks + + // Tracking + queueTime time.Time + ctx context.Context } diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index b0660686f5c1b..148205b306c19 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -207,6 +207,7 @@ type Limits struct { BloomCreationEnabled bool `yaml:"bloom_creation_enabled" json:"bloom_creation_enabled" category:"experimental"` BloomSplitSeriesKeyspaceBy int `yaml:"bloom_split_series_keyspace_by" json:"bloom_split_series_keyspace_by" category:"experimental"` + BloomBuildMaxBuilders int `yaml:"bloom_build_max_builders" json:"bloom_build_max_builders" category:"experimental"` BloomNGramLength int `yaml:"bloom_ngram_length" json:"bloom_ngram_length" category:"experimental"` BloomNGramSkip int `yaml:"bloom_ngram_skip" json:"bloom_ngram_skip" category:"experimental"` @@ -385,6 +386,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&l.BloomCreationEnabled, "bloom-build.enable", false, "Experimental. Whether to create blooms for the tenant.") f.IntVar(&l.BloomSplitSeriesKeyspaceBy, "bloom-build.split-keyspace-by", 256, "Experimental. Number of splits to create for the series keyspace when building blooms. The series keyspace is split into this many parts to parallelize bloom creation.") + f.IntVar(&l.BloomBuildMaxBuilders, "bloom-build.max-builders", 0, "Experimental. Maximum number of builders to use when building blooms. 0 allows unlimited builders.") _ = l.BloomCompactorMaxBloomSize.Set(defaultBloomCompactorMaxBloomSize) f.Var(&l.BloomCompactorMaxBloomSize, "bloom-compactor.max-bloom-size", @@ -987,6 +989,10 @@ func (o *Overrides) BloomSplitSeriesKeyspaceBy(userID string) int { return o.getOverridesForUser(userID).BloomSplitSeriesKeyspaceBy } +func (o *Overrides) BloomBuildMaxBuilders(userID string) int { + return o.getOverridesForUser(userID).BloomBuildMaxBuilders +} + func (o *Overrides) BloomNGramLength(userID string) int { return o.getOverridesForUser(userID).BloomNGramLength } From 1948899999107e7f27f4b9faace64942abcdb41f Mon Sep 17 00:00:00 2001 From: Quentin Bisson Date: Wed, 22 May 2024 15:16:29 +0200 Subject: [PATCH 47/47] fix: Mixins - Add missing log datasource on loki-deletion (#13011) --- .../dashboards/loki-deletion.json | 10 ++++++++++ .../loki-mixin-compiled/dashboards/loki-deletion.json | 10 ++++++++++ .../loki-mixin/dashboards/loki-deletion.libsonnet | 1 + 3 files changed, 21 insertions(+) diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json b/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json index 8ce9529906aa4..0718507d941fd 100644 --- a/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json +++ b/production/loki-mixin-compiled-ssd/dashboards/loki-deletion.json @@ -701,6 +701,16 @@ "tagsQuery": "", "type": "query", "useTags": false + }, + { + "hide": 0, + "label": null, + "name": "loki_datasource", + "options": [ ], + "query": "loki", + "refresh": 1, + "regex": "", + "type": "datasource" } ] }, diff --git a/production/loki-mixin-compiled/dashboards/loki-deletion.json b/production/loki-mixin-compiled/dashboards/loki-deletion.json index 2db2b7cb36586..7b048f729e2b3 100644 --- a/production/loki-mixin-compiled/dashboards/loki-deletion.json +++ b/production/loki-mixin-compiled/dashboards/loki-deletion.json @@ -701,6 +701,16 @@ "tagsQuery": "", "type": "query", "useTags": false + }, + { + "hide": 0, + "label": null, + "name": "loki_datasource", + "options": [ ], + "query": "loki", + "refresh": 1, + "regex": "", + "type": "datasource" } ] }, diff --git a/production/loki-mixin/dashboards/loki-deletion.libsonnet b/production/loki-mixin/dashboards/loki-deletion.libsonnet index 0ddb39fea6953..a1c50ecfa6911 100644 --- a/production/loki-mixin/dashboards/loki-deletion.libsonnet +++ b/production/loki-mixin/dashboards/loki-deletion.libsonnet @@ -12,6 +12,7 @@ local utils = import 'mixin-utils/utils.libsonnet'; .addCluster() .addNamespace() .addTag() + .addLog() .addRow( ($.row('Headlines') + {