From 7c0296c2665a7a9e381f2cdfc044bd6c816c84a8 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Fri, 18 Aug 2023 14:54:34 -0600 Subject: [PATCH 1/8] Start name change for cpu.utilization --- .../kubeletstatsreceiver/documentation.md | 24 +++ .../internal/kubelet/cpu.go | 1 + .../internal/metadata/generated_config.go | 12 ++ .../metadata/generated_config_test.go | 6 + .../internal/metadata/generated_metrics.go | 180 ++++++++++++++++++ .../metadata/generated_metrics_test.go | 57 ++++++ .../internal/metadata/metrics.go | 4 + .../internal/metadata/testdata/config.yaml | 12 ++ receiver/kubeletstatsreceiver/metadata.yaml | 27 +++ 9 files changed, 323 insertions(+) diff --git a/receiver/kubeletstatsreceiver/documentation.md b/receiver/kubeletstatsreceiver/documentation.md index 2b00e234a653..5f987bec64a5 100644 --- a/receiver/kubeletstatsreceiver/documentation.md +++ b/receiver/kubeletstatsreceiver/documentation.md @@ -386,6 +386,14 @@ metrics: enabled: true ``` +### container.cpu.usage + +Container CPU usage + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + ### container.uptime The time since the container started @@ -426,6 +434,14 @@ Container memory utilization as a ratio of the container's requests | ---- | ----------- | ---------- | | 1 | Gauge | Double | +### k8s.node.cpu.usage + +Node CPU usage + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + ### k8s.node.uptime The time since the node started @@ -434,6 +450,14 @@ The time since the node started | ---- | ----------- | ---------- | ----------------------- | --------- | | s | Sum | Int | Cumulative | true | +### k8s.pod.cpu.usage + +Pod CPU usage + +| Unit | Metric Type | Value Type | +| ---- | ----------- | ---------- | +| 1 | Gauge | Double | + ### k8s.pod.cpu_limit_utilization Pod cpu utilization as a ratio of the pod's total container limits. If any container is missing a limit the metric is not emitted. diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go b/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go index 00ec2e00451f..15507649147f 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go @@ -24,6 +24,7 @@ func addCPUUsageMetric(mb *metadata.MetricsBuilder, cpuMetrics metadata.CPUMetri } value := float64(*s.UsageNanoCores) / 1_000_000_000 cpuMetrics.Utilization(mb, currentTime, value) + cpuMetrics.Usage(mb, currentTime, value) if r.cpuLimit > 0 { cpuMetrics.LimitUtilization(mb, currentTime, value/r.cpuLimit) diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go index 1e8fa9f76d4f..3d4ceebf2075 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_config.go @@ -26,6 +26,7 @@ func (ms *MetricConfig) Unmarshal(parser *confmap.Conf) error { // MetricsConfig provides config for kubeletstats metrics. type MetricsConfig struct { ContainerCPUTime MetricConfig `mapstructure:"container.cpu.time"` + ContainerCPUUsage MetricConfig `mapstructure:"container.cpu.usage"` ContainerCPUUtilization MetricConfig `mapstructure:"container.cpu.utilization"` ContainerFilesystemAvailable MetricConfig `mapstructure:"container.filesystem.available"` ContainerFilesystemCapacity MetricConfig `mapstructure:"container.filesystem.capacity"` @@ -42,6 +43,7 @@ type MetricsConfig struct { K8sContainerMemoryLimitUtilization MetricConfig `mapstructure:"k8s.container.memory_limit_utilization"` K8sContainerMemoryRequestUtilization MetricConfig `mapstructure:"k8s.container.memory_request_utilization"` K8sNodeCPUTime MetricConfig `mapstructure:"k8s.node.cpu.time"` + K8sNodeCPUUsage MetricConfig `mapstructure:"k8s.node.cpu.usage"` K8sNodeCPUUtilization MetricConfig `mapstructure:"k8s.node.cpu.utilization"` K8sNodeFilesystemAvailable MetricConfig `mapstructure:"k8s.node.filesystem.available"` K8sNodeFilesystemCapacity MetricConfig `mapstructure:"k8s.node.filesystem.capacity"` @@ -56,6 +58,7 @@ type MetricsConfig struct { K8sNodeNetworkIo MetricConfig `mapstructure:"k8s.node.network.io"` K8sNodeUptime MetricConfig `mapstructure:"k8s.node.uptime"` K8sPodCPUTime MetricConfig `mapstructure:"k8s.pod.cpu.time"` + K8sPodCPUUsage MetricConfig `mapstructure:"k8s.pod.cpu.usage"` K8sPodCPUUtilization MetricConfig `mapstructure:"k8s.pod.cpu.utilization"` K8sPodCPULimitUtilization MetricConfig `mapstructure:"k8s.pod.cpu_limit_utilization"` K8sPodCPURequestUtilization MetricConfig `mapstructure:"k8s.pod.cpu_request_utilization"` @@ -85,6 +88,9 @@ func DefaultMetricsConfig() MetricsConfig { ContainerCPUTime: MetricConfig{ Enabled: true, }, + ContainerCPUUsage: MetricConfig{ + Enabled: false, + }, ContainerCPUUtilization: MetricConfig{ Enabled: true, }, @@ -133,6 +139,9 @@ func DefaultMetricsConfig() MetricsConfig { K8sNodeCPUTime: MetricConfig{ Enabled: true, }, + K8sNodeCPUUsage: MetricConfig{ + Enabled: false, + }, K8sNodeCPUUtilization: MetricConfig{ Enabled: true, }, @@ -175,6 +184,9 @@ func DefaultMetricsConfig() MetricsConfig { K8sPodCPUTime: MetricConfig{ Enabled: true, }, + K8sPodCPUUsage: MetricConfig{ + Enabled: false, + }, K8sPodCPUUtilization: MetricConfig{ Enabled: true, }, diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go index 7b1259eff5ec..91ecb174c7a1 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_config_test.go @@ -27,6 +27,7 @@ func TestMetricsBuilderConfig(t *testing.T) { want: MetricsBuilderConfig{ Metrics: MetricsConfig{ ContainerCPUTime: MetricConfig{Enabled: true}, + ContainerCPUUsage: MetricConfig{Enabled: true}, ContainerCPUUtilization: MetricConfig{Enabled: true}, ContainerFilesystemAvailable: MetricConfig{Enabled: true}, ContainerFilesystemCapacity: MetricConfig{Enabled: true}, @@ -43,6 +44,7 @@ func TestMetricsBuilderConfig(t *testing.T) { K8sContainerMemoryLimitUtilization: MetricConfig{Enabled: true}, K8sContainerMemoryRequestUtilization: MetricConfig{Enabled: true}, K8sNodeCPUTime: MetricConfig{Enabled: true}, + K8sNodeCPUUsage: MetricConfig{Enabled: true}, K8sNodeCPUUtilization: MetricConfig{Enabled: true}, K8sNodeFilesystemAvailable: MetricConfig{Enabled: true}, K8sNodeFilesystemCapacity: MetricConfig{Enabled: true}, @@ -57,6 +59,7 @@ func TestMetricsBuilderConfig(t *testing.T) { K8sNodeNetworkIo: MetricConfig{Enabled: true}, K8sNodeUptime: MetricConfig{Enabled: true}, K8sPodCPUTime: MetricConfig{Enabled: true}, + K8sPodCPUUsage: MetricConfig{Enabled: true}, K8sPodCPUUtilization: MetricConfig{Enabled: true}, K8sPodCPULimitUtilization: MetricConfig{Enabled: true}, K8sPodCPURequestUtilization: MetricConfig{Enabled: true}, @@ -104,6 +107,7 @@ func TestMetricsBuilderConfig(t *testing.T) { want: MetricsBuilderConfig{ Metrics: MetricsConfig{ ContainerCPUTime: MetricConfig{Enabled: false}, + ContainerCPUUsage: MetricConfig{Enabled: false}, ContainerCPUUtilization: MetricConfig{Enabled: false}, ContainerFilesystemAvailable: MetricConfig{Enabled: false}, ContainerFilesystemCapacity: MetricConfig{Enabled: false}, @@ -120,6 +124,7 @@ func TestMetricsBuilderConfig(t *testing.T) { K8sContainerMemoryLimitUtilization: MetricConfig{Enabled: false}, K8sContainerMemoryRequestUtilization: MetricConfig{Enabled: false}, K8sNodeCPUTime: MetricConfig{Enabled: false}, + K8sNodeCPUUsage: MetricConfig{Enabled: false}, K8sNodeCPUUtilization: MetricConfig{Enabled: false}, K8sNodeFilesystemAvailable: MetricConfig{Enabled: false}, K8sNodeFilesystemCapacity: MetricConfig{Enabled: false}, @@ -134,6 +139,7 @@ func TestMetricsBuilderConfig(t *testing.T) { K8sNodeNetworkIo: MetricConfig{Enabled: false}, K8sNodeUptime: MetricConfig{Enabled: false}, K8sPodCPUTime: MetricConfig{Enabled: false}, + K8sPodCPUUsage: MetricConfig{Enabled: false}, K8sPodCPUUtilization: MetricConfig{Enabled: false}, K8sPodCPULimitUtilization: MetricConfig{Enabled: false}, K8sPodCPURequestUtilization: MetricConfig{Enabled: false}, diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go index 03f94dc800bf..50251d5b8a2b 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go @@ -88,6 +88,55 @@ func newMetricContainerCPUTime(cfg MetricConfig) metricContainerCPUTime { return m } +type metricContainerCPUUsage struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills container.cpu.usage metric with initial data. +func (m *metricContainerCPUUsage) init() { + m.data.SetName("container.cpu.usage") + m.data.SetDescription("Container CPU usage") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricContainerCPUUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricContainerCPUUsage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricContainerCPUUsage) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricContainerCPUUsage(cfg MetricConfig) metricContainerCPUUsage { + m := metricContainerCPUUsage{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricContainerCPUUtilization struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -876,6 +925,55 @@ func newMetricK8sNodeCPUTime(cfg MetricConfig) metricK8sNodeCPUTime { return m } +type metricK8sNodeCPUUsage struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.node.cpu.usage metric with initial data. +func (m *metricK8sNodeCPUUsage) init() { + m.data.SetName("k8s.node.cpu.usage") + m.data.SetDescription("Node CPU usage") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sNodeCPUUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sNodeCPUUsage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sNodeCPUUsage) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sNodeCPUUsage(cfg MetricConfig) metricK8sNodeCPUUsage { + m := metricK8sNodeCPUUsage{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricK8sNodeCPUUtilization struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -1576,6 +1674,55 @@ func newMetricK8sPodCPUTime(cfg MetricConfig) metricK8sPodCPUTime { return m } +type metricK8sPodCPUUsage struct { + data pmetric.Metric // data buffer for generated metric. + config MetricConfig // metric config provided by user. + capacity int // max observed number of data points added to the metric. +} + +// init fills k8s.pod.cpu.usage metric with initial data. +func (m *metricK8sPodCPUUsage) init() { + m.data.SetName("k8s.pod.cpu.usage") + m.data.SetDescription("Pod CPU usage") + m.data.SetUnit("1") + m.data.SetEmptyGauge() +} + +func (m *metricK8sPodCPUUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { + if !m.config.Enabled { + return + } + dp := m.data.Gauge().DataPoints().AppendEmpty() + dp.SetStartTimestamp(start) + dp.SetTimestamp(ts) + dp.SetDoubleValue(val) +} + +// updateCapacity saves max length of data point slices that will be used for the slice capacity. +func (m *metricK8sPodCPUUsage) updateCapacity() { + if m.data.Gauge().DataPoints().Len() > m.capacity { + m.capacity = m.data.Gauge().DataPoints().Len() + } +} + +// emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. +func (m *metricK8sPodCPUUsage) emit(metrics pmetric.MetricSlice) { + if m.config.Enabled && m.data.Gauge().DataPoints().Len() > 0 { + m.updateCapacity() + m.data.MoveTo(metrics.AppendEmpty()) + m.init() + } +} + +func newMetricK8sPodCPUUsage(cfg MetricConfig) metricK8sPodCPUUsage { + m := metricK8sPodCPUUsage{config: cfg} + if cfg.Enabled { + m.data = pmetric.NewMetric() + m.init() + } + return m +} + type metricK8sPodCPUUtilization struct { data pmetric.Metric // data buffer for generated metric. config MetricConfig // metric config provided by user. @@ -2675,6 +2822,7 @@ type MetricsBuilder struct { metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. buildInfo component.BuildInfo // contains version information. metricContainerCPUTime metricContainerCPUTime + metricContainerCPUUsage metricContainerCPUUsage metricContainerCPUUtilization metricContainerCPUUtilization metricContainerFilesystemAvailable metricContainerFilesystemAvailable metricContainerFilesystemCapacity metricContainerFilesystemCapacity @@ -2691,6 +2839,7 @@ type MetricsBuilder struct { metricK8sContainerMemoryLimitUtilization metricK8sContainerMemoryLimitUtilization metricK8sContainerMemoryRequestUtilization metricK8sContainerMemoryRequestUtilization metricK8sNodeCPUTime metricK8sNodeCPUTime + metricK8sNodeCPUUsage metricK8sNodeCPUUsage metricK8sNodeCPUUtilization metricK8sNodeCPUUtilization metricK8sNodeFilesystemAvailable metricK8sNodeFilesystemAvailable metricK8sNodeFilesystemCapacity metricK8sNodeFilesystemCapacity @@ -2705,6 +2854,7 @@ type MetricsBuilder struct { metricK8sNodeNetworkIo metricK8sNodeNetworkIo metricK8sNodeUptime metricK8sNodeUptime metricK8sPodCPUTime metricK8sPodCPUTime + metricK8sPodCPUUsage metricK8sPodCPUUsage metricK8sPodCPUUtilization metricK8sPodCPUUtilization metricK8sPodCPULimitUtilization metricK8sPodCPULimitUtilization metricK8sPodCPURequestUtilization metricK8sPodCPURequestUtilization @@ -2740,12 +2890,22 @@ func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { } func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { + if !mbc.Metrics.ContainerCPUUtilization.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `container.cpu.utilization`: WARNING: This metric will be disabled in a future release. Use metric container.cpu.usage instead.") + } + if !mbc.Metrics.K8sNodeCPUUtilization.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `k8s.node.cpu.utilization`: WARNING: This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead.") + } + if !mbc.Metrics.K8sPodCPUUtilization.enabledSetByUser { + settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `k8s.pod.cpu.utilization`: This metric will be disabled in a future release. Use metric k8s.pod.cpu.usage instead.") + } mb := &MetricsBuilder{ config: mbc, startTime: pcommon.NewTimestampFromTime(time.Now()), metricsBuffer: pmetric.NewMetrics(), buildInfo: settings.BuildInfo, metricContainerCPUTime: newMetricContainerCPUTime(mbc.Metrics.ContainerCPUTime), + metricContainerCPUUsage: newMetricContainerCPUUsage(mbc.Metrics.ContainerCPUUsage), metricContainerCPUUtilization: newMetricContainerCPUUtilization(mbc.Metrics.ContainerCPUUtilization), metricContainerFilesystemAvailable: newMetricContainerFilesystemAvailable(mbc.Metrics.ContainerFilesystemAvailable), metricContainerFilesystemCapacity: newMetricContainerFilesystemCapacity(mbc.Metrics.ContainerFilesystemCapacity), @@ -2762,6 +2922,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricK8sContainerMemoryLimitUtilization: newMetricK8sContainerMemoryLimitUtilization(mbc.Metrics.K8sContainerMemoryLimitUtilization), metricK8sContainerMemoryRequestUtilization: newMetricK8sContainerMemoryRequestUtilization(mbc.Metrics.K8sContainerMemoryRequestUtilization), metricK8sNodeCPUTime: newMetricK8sNodeCPUTime(mbc.Metrics.K8sNodeCPUTime), + metricK8sNodeCPUUsage: newMetricK8sNodeCPUUsage(mbc.Metrics.K8sNodeCPUUsage), metricK8sNodeCPUUtilization: newMetricK8sNodeCPUUtilization(mbc.Metrics.K8sNodeCPUUtilization), metricK8sNodeFilesystemAvailable: newMetricK8sNodeFilesystemAvailable(mbc.Metrics.K8sNodeFilesystemAvailable), metricK8sNodeFilesystemCapacity: newMetricK8sNodeFilesystemCapacity(mbc.Metrics.K8sNodeFilesystemCapacity), @@ -2776,6 +2937,7 @@ func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSetting metricK8sNodeNetworkIo: newMetricK8sNodeNetworkIo(mbc.Metrics.K8sNodeNetworkIo), metricK8sNodeUptime: newMetricK8sNodeUptime(mbc.Metrics.K8sNodeUptime), metricK8sPodCPUTime: newMetricK8sPodCPUTime(mbc.Metrics.K8sPodCPUTime), + metricK8sPodCPUUsage: newMetricK8sPodCPUUsage(mbc.Metrics.K8sPodCPUUsage), metricK8sPodCPUUtilization: newMetricK8sPodCPUUtilization(mbc.Metrics.K8sPodCPUUtilization), metricK8sPodCPULimitUtilization: newMetricK8sPodCPULimitUtilization(mbc.Metrics.K8sPodCPULimitUtilization), metricK8sPodCPURequestUtilization: newMetricK8sPodCPURequestUtilization(mbc.Metrics.K8sPodCPURequestUtilization), @@ -2860,6 +3022,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { ils.Scope().SetVersion(mb.buildInfo.Version) ils.Metrics().EnsureCapacity(mb.metricsCapacity) mb.metricContainerCPUTime.emit(ils.Metrics()) + mb.metricContainerCPUUsage.emit(ils.Metrics()) mb.metricContainerCPUUtilization.emit(ils.Metrics()) mb.metricContainerFilesystemAvailable.emit(ils.Metrics()) mb.metricContainerFilesystemCapacity.emit(ils.Metrics()) @@ -2876,6 +3039,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricK8sContainerMemoryLimitUtilization.emit(ils.Metrics()) mb.metricK8sContainerMemoryRequestUtilization.emit(ils.Metrics()) mb.metricK8sNodeCPUTime.emit(ils.Metrics()) + mb.metricK8sNodeCPUUsage.emit(ils.Metrics()) mb.metricK8sNodeCPUUtilization.emit(ils.Metrics()) mb.metricK8sNodeFilesystemAvailable.emit(ils.Metrics()) mb.metricK8sNodeFilesystemCapacity.emit(ils.Metrics()) @@ -2890,6 +3054,7 @@ func (mb *MetricsBuilder) EmitForResource(rmo ...ResourceMetricsOption) { mb.metricK8sNodeNetworkIo.emit(ils.Metrics()) mb.metricK8sNodeUptime.emit(ils.Metrics()) mb.metricK8sPodCPUTime.emit(ils.Metrics()) + mb.metricK8sPodCPUUsage.emit(ils.Metrics()) mb.metricK8sPodCPUUtilization.emit(ils.Metrics()) mb.metricK8sPodCPULimitUtilization.emit(ils.Metrics()) mb.metricK8sPodCPURequestUtilization.emit(ils.Metrics()) @@ -2937,6 +3102,11 @@ func (mb *MetricsBuilder) RecordContainerCPUTimeDataPoint(ts pcommon.Timestamp, mb.metricContainerCPUTime.recordDataPoint(mb.startTime, ts, val) } +// RecordContainerCPUUsageDataPoint adds a data point to container.cpu.usage metric. +func (mb *MetricsBuilder) RecordContainerCPUUsageDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricContainerCPUUsage.recordDataPoint(mb.startTime, ts, val) +} + // RecordContainerCPUUtilizationDataPoint adds a data point to container.cpu.utilization metric. func (mb *MetricsBuilder) RecordContainerCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64) { mb.metricContainerCPUUtilization.recordDataPoint(mb.startTime, ts, val) @@ -3017,6 +3187,11 @@ func (mb *MetricsBuilder) RecordK8sNodeCPUTimeDataPoint(ts pcommon.Timestamp, va mb.metricK8sNodeCPUTime.recordDataPoint(mb.startTime, ts, val) } +// RecordK8sNodeCPUUsageDataPoint adds a data point to k8s.node.cpu.usage metric. +func (mb *MetricsBuilder) RecordK8sNodeCPUUsageDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricK8sNodeCPUUsage.recordDataPoint(mb.startTime, ts, val) +} + // RecordK8sNodeCPUUtilizationDataPoint adds a data point to k8s.node.cpu.utilization metric. func (mb *MetricsBuilder) RecordK8sNodeCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64) { mb.metricK8sNodeCPUUtilization.recordDataPoint(mb.startTime, ts, val) @@ -3087,6 +3262,11 @@ func (mb *MetricsBuilder) RecordK8sPodCPUTimeDataPoint(ts pcommon.Timestamp, val mb.metricK8sPodCPUTime.recordDataPoint(mb.startTime, ts, val) } +// RecordK8sPodCPUUsageDataPoint adds a data point to k8s.pod.cpu.usage metric. +func (mb *MetricsBuilder) RecordK8sPodCPUUsageDataPoint(ts pcommon.Timestamp, val float64) { + mb.metricK8sPodCPUUsage.recordDataPoint(mb.startTime, ts, val) +} + // RecordK8sPodCPUUtilizationDataPoint adds a data point to k8s.pod.cpu.utilization metric. func (mb *MetricsBuilder) RecordK8sPodCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64) { mb.metricK8sPodCPUUtilization.recordDataPoint(mb.startTime, ts, val) diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go index 779fe477bf9c..00f349a92eeb 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go @@ -49,6 +49,18 @@ func TestMetricsBuilder(t *testing.T) { mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 + if test.configSet == testSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `container.cpu.utilization`: WARNING: This metric will be disabled in a future release. Use metric container.cpu.usage instead.", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } + if test.configSet == testSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `k8s.node.cpu.utilization`: WARNING: This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead.", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } + if test.configSet == testSetDefault { + assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `k8s.pod.cpu.utilization`: This metric will be disabled in a future release. Use metric k8s.pod.cpu.usage instead.", observedLogs.All()[expectedWarnings].Message) + expectedWarnings++ + } assert.Equal(t, expectedWarnings, observedLogs.Len()) @@ -59,6 +71,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordContainerCPUTimeDataPoint(ts, 1) + allMetricsCount++ + mb.RecordContainerCPUUsageDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordContainerCPUUtilizationDataPoint(ts, 1) @@ -118,6 +133,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordK8sNodeCPUTimeDataPoint(ts, 1) + allMetricsCount++ + mb.RecordK8sNodeCPUUsageDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordK8sNodeCPUUtilizationDataPoint(ts, 1) @@ -173,6 +191,9 @@ func TestMetricsBuilder(t *testing.T) { allMetricsCount++ mb.RecordK8sPodCPUTimeDataPoint(ts, 1) + allMetricsCount++ + mb.RecordK8sPodCPUUsageDataPoint(ts, 1) + defaultMetricsCount++ allMetricsCount++ mb.RecordK8sPodCPUUtilizationDataPoint(ts, 1) @@ -308,6 +329,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) assert.Equal(t, float64(1), dp.DoubleValue()) + case "container.cpu.usage": + assert.False(t, validatedMetrics["container.cpu.usage"], "Found a duplicate in the metrics slice: container.cpu.usage") + validatedMetrics["container.cpu.usage"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Container CPU usage", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) case "container.cpu.utilization": assert.False(t, validatedMetrics["container.cpu.utilization"], "Found a duplicate in the metrics slice: container.cpu.utilization") validatedMetrics["container.cpu.utilization"] = true @@ -504,6 +537,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) assert.Equal(t, float64(1), dp.DoubleValue()) + case "k8s.node.cpu.usage": + assert.False(t, validatedMetrics["k8s.node.cpu.usage"], "Found a duplicate in the metrics slice: k8s.node.cpu.usage") + validatedMetrics["k8s.node.cpu.usage"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Node CPU usage", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) case "k8s.node.cpu.utilization": assert.False(t, validatedMetrics["k8s.node.cpu.utilization"], "Found a duplicate in the metrics slice: k8s.node.cpu.utilization") validatedMetrics["k8s.node.cpu.utilization"] = true @@ -692,6 +737,18 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, ts, dp.Timestamp()) assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) assert.Equal(t, float64(1), dp.DoubleValue()) + case "k8s.pod.cpu.usage": + assert.False(t, validatedMetrics["k8s.pod.cpu.usage"], "Found a duplicate in the metrics slice: k8s.pod.cpu.usage") + validatedMetrics["k8s.pod.cpu.usage"] = true + assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) + assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) + assert.Equal(t, "Pod CPU usage", ms.At(i).Description()) + assert.Equal(t, "1", ms.At(i).Unit()) + dp := ms.At(i).Gauge().DataPoints().At(0) + assert.Equal(t, start, dp.StartTimestamp()) + assert.Equal(t, ts, dp.Timestamp()) + assert.Equal(t, pmetric.NumberDataPointValueTypeDouble, dp.ValueType()) + assert.Equal(t, float64(1), dp.DoubleValue()) case "k8s.pod.cpu.utilization": assert.False(t, validatedMetrics["k8s.pod.cpu.utilization"], "Found a duplicate in the metrics slice: k8s.pod.cpu.utilization") validatedMetrics["k8s.pod.cpu.utilization"] = true diff --git a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go index 7aef32500de8..38b80f9d9f2c 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/metrics.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/metrics.go @@ -20,6 +20,7 @@ type MetricsBuilders struct { type CPUMetrics struct { Time RecordDoubleDataPointFunc + Usage RecordDoubleDataPointFunc Utilization RecordDoubleDataPointFunc LimitUtilization RecordDoubleDataPointFunc RequestUtilization RecordDoubleDataPointFunc @@ -27,11 +28,13 @@ type CPUMetrics struct { var NodeCPUMetrics = CPUMetrics{ Time: (*MetricsBuilder).RecordK8sNodeCPUTimeDataPoint, + Usage: (*MetricsBuilder).RecordK8sNodeCPUUsageDataPoint, Utilization: (*MetricsBuilder).RecordK8sNodeCPUUtilizationDataPoint, } var PodCPUMetrics = CPUMetrics{ Time: (*MetricsBuilder).RecordK8sPodCPUTimeDataPoint, + Usage: (*MetricsBuilder).RecordK8sPodCPUUsageDataPoint, Utilization: (*MetricsBuilder).RecordK8sPodCPUUtilizationDataPoint, LimitUtilization: (*MetricsBuilder).RecordK8sPodCPULimitUtilizationDataPoint, RequestUtilization: (*MetricsBuilder).RecordK8sPodCPURequestUtilizationDataPoint, @@ -39,6 +42,7 @@ var PodCPUMetrics = CPUMetrics{ var ContainerCPUMetrics = CPUMetrics{ Time: (*MetricsBuilder).RecordContainerCPUTimeDataPoint, + Usage: (*MetricsBuilder).RecordContainerCPUUsageDataPoint, Utilization: (*MetricsBuilder).RecordContainerCPUUtilizationDataPoint, LimitUtilization: (*MetricsBuilder).RecordK8sContainerCPULimitUtilizationDataPoint, RequestUtilization: (*MetricsBuilder).RecordK8sContainerCPURequestUtilizationDataPoint, diff --git a/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml b/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml index c22b9fe2daf8..670fea221c4d 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml +++ b/receiver/kubeletstatsreceiver/internal/metadata/testdata/config.yaml @@ -3,6 +3,8 @@ all_set: metrics: container.cpu.time: enabled: true + container.cpu.usage: + enabled: true container.cpu.utilization: enabled: true container.filesystem.available: @@ -35,6 +37,8 @@ all_set: enabled: true k8s.node.cpu.time: enabled: true + k8s.node.cpu.usage: + enabled: true k8s.node.cpu.utilization: enabled: true k8s.node.filesystem.available: @@ -63,6 +67,8 @@ all_set: enabled: true k8s.pod.cpu.time: enabled: true + k8s.pod.cpu.usage: + enabled: true k8s.pod.cpu.utilization: enabled: true k8s.pod.cpu_limit_utilization: @@ -142,6 +148,8 @@ none_set: metrics: container.cpu.time: enabled: false + container.cpu.usage: + enabled: false container.cpu.utilization: enabled: false container.filesystem.available: @@ -174,6 +182,8 @@ none_set: enabled: false k8s.node.cpu.time: enabled: false + k8s.node.cpu.usage: + enabled: false k8s.node.cpu.utilization: enabled: false k8s.node.filesystem.available: @@ -202,6 +212,8 @@ none_set: enabled: false k8s.pod.cpu.time: enabled: false + k8s.pod.cpu.usage: + enabled: false k8s.pod.cpu.utilization: enabled: false k8s.pod.cpu_limit_utilization: diff --git a/receiver/kubeletstatsreceiver/metadata.yaml b/receiver/kubeletstatsreceiver/metadata.yaml index e384819097e7..b0534b399e00 100644 --- a/receiver/kubeletstatsreceiver/metadata.yaml +++ b/receiver/kubeletstatsreceiver/metadata.yaml @@ -81,9 +81,18 @@ attributes: enum: [receive, transmit] metrics: + k8s.node.cpu.usage: + enabled: false + description: "Node CPU usage" + unit: 1 + gauge: + value_type: double + attributes: [] k8s.node.cpu.utilization: enabled: true description: "Node CPU utilization" + warnings: + if_enabled_not_set: "WARNING: This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead." unit: 1 gauge: value_type: double @@ -187,9 +196,18 @@ metrics: monotonic: true aggregation_temporality: cumulative attributes: [] + k8s.pod.cpu.usage: + enabled: false + description: "Pod CPU usage" + unit: 1 + gauge: + value_type: double + attributes: [ ] k8s.pod.cpu.utilization: enabled: true description: "Pod CPU utilization" + warnings: + if_enabled_not_set: "This metric will be disabled in a future release. Use metric k8s.pod.cpu.usage instead." unit: 1 gauge: value_type: double @@ -321,9 +339,18 @@ metrics: monotonic: true aggregation_temporality: cumulative attributes: [] + container.cpu.usage: + enabled: false + description: "Container CPU usage" + unit: 1 + gauge: + value_type: double + attributes: [ ] container.cpu.utilization: enabled: true description: "Container CPU utilization" + warnings: + if_enabled_not_set: "WARNING: This metric will be disabled in a future release. Use metric container.cpu.usage instead." unit: 1 gauge: value_type: double From 2dfdd5f00864ca205e0d97e3cbd231b31af978d4 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Mon, 21 Aug 2023 08:45:00 -0600 Subject: [PATCH 2/8] changelog --- ...tstats-start-changing-cpu-utilization.yaml | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100755 .chloggen/kubeletstats-start-changing-cpu-utilization.yaml diff --git a/.chloggen/kubeletstats-start-changing-cpu-utilization.yaml b/.chloggen/kubeletstats-start-changing-cpu-utilization.yaml new file mode 100755 index 000000000000..c96eb873164d --- /dev/null +++ b/.chloggen/kubeletstats-start-changing-cpu-utilization.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: kubeletstatsreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add new `*.cpu.usage` metrics. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [25901] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] From f9306b8b8b76f96b9a0fe6e3de23982e783b64c4 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Mon, 23 Oct 2023 08:47:35 -0600 Subject: [PATCH 3/8] Update units --- receiver/kubeletstatsreceiver/documentation.md | 6 +++--- .../internal/metadata/generated_metrics.go | 6 +++--- .../internal/metadata/generated_metrics_test.go | 6 +++--- receiver/kubeletstatsreceiver/metadata.yaml | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/receiver/kubeletstatsreceiver/documentation.md b/receiver/kubeletstatsreceiver/documentation.md index 5f987bec64a5..e8513301469f 100644 --- a/receiver/kubeletstatsreceiver/documentation.md +++ b/receiver/kubeletstatsreceiver/documentation.md @@ -392,7 +392,7 @@ Container CPU usage | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Double | +| nanoseconds | Gauge | Double | ### container.uptime @@ -440,7 +440,7 @@ Node CPU usage | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Double | +| nanoseconds | Gauge | Double | ### k8s.node.uptime @@ -456,7 +456,7 @@ Pod CPU usage | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| 1 | Gauge | Double | +| nanoseconds | Gauge | Double | ### k8s.pod.cpu_limit_utilization diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go index 50251d5b8a2b..e57a04769569 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go @@ -98,7 +98,7 @@ type metricContainerCPUUsage struct { func (m *metricContainerCPUUsage) init() { m.data.SetName("container.cpu.usage") m.data.SetDescription("Container CPU usage") - m.data.SetUnit("1") + m.data.SetUnit("nanoseconds") m.data.SetEmptyGauge() } @@ -935,7 +935,7 @@ type metricK8sNodeCPUUsage struct { func (m *metricK8sNodeCPUUsage) init() { m.data.SetName("k8s.node.cpu.usage") m.data.SetDescription("Node CPU usage") - m.data.SetUnit("1") + m.data.SetUnit("nanoseconds") m.data.SetEmptyGauge() } @@ -1684,7 +1684,7 @@ type metricK8sPodCPUUsage struct { func (m *metricK8sPodCPUUsage) init() { m.data.SetName("k8s.pod.cpu.usage") m.data.SetDescription("Pod CPU usage") - m.data.SetUnit("1") + m.data.SetUnit("nanoseconds") m.data.SetEmptyGauge() } diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go index 00f349a92eeb..0b554eb54f55 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go @@ -335,7 +335,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Container CPU usage", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "nanoseconds", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -543,7 +543,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Node CPU usage", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "nanoseconds", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -743,7 +743,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Pod CPU usage", ms.At(i).Description()) - assert.Equal(t, "1", ms.At(i).Unit()) + assert.Equal(t, "nanoseconds", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) diff --git a/receiver/kubeletstatsreceiver/metadata.yaml b/receiver/kubeletstatsreceiver/metadata.yaml index b0534b399e00..88f3c329078d 100644 --- a/receiver/kubeletstatsreceiver/metadata.yaml +++ b/receiver/kubeletstatsreceiver/metadata.yaml @@ -84,7 +84,7 @@ metrics: k8s.node.cpu.usage: enabled: false description: "Node CPU usage" - unit: 1 + unit: nanoseconds gauge: value_type: double attributes: [] @@ -199,7 +199,7 @@ metrics: k8s.pod.cpu.usage: enabled: false description: "Pod CPU usage" - unit: 1 + unit: nanoseconds gauge: value_type: double attributes: [ ] @@ -342,7 +342,7 @@ metrics: container.cpu.usage: enabled: false description: "Container CPU usage" - unit: 1 + unit: nanoseconds gauge: value_type: double attributes: [ ] From 6c323c1673e2ae5182ab578017c08057f4310e84 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Tue, 24 Oct 2023 08:54:22 -0600 Subject: [PATCH 4/8] Update units --- receiver/kubeletstatsreceiver/documentation.md | 6 +++--- .../internal/metadata/generated_metrics.go | 18 +++++++++--------- .../metadata/generated_metrics_test.go | 18 +++++++++--------- receiver/kubeletstatsreceiver/metadata.yaml | 12 ++++++------ 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/receiver/kubeletstatsreceiver/documentation.md b/receiver/kubeletstatsreceiver/documentation.md index e8513301469f..5f37fd740a27 100644 --- a/receiver/kubeletstatsreceiver/documentation.md +++ b/receiver/kubeletstatsreceiver/documentation.md @@ -392,7 +392,7 @@ Container CPU usage | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| nanoseconds | Gauge | Double | +| s | Gauge | Double | ### container.uptime @@ -440,7 +440,7 @@ Node CPU usage | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| nanoseconds | Gauge | Double | +| s | Gauge | Double | ### k8s.node.uptime @@ -456,7 +456,7 @@ Pod CPU usage | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| nanoseconds | Gauge | Double | +| s | Gauge | Double | ### k8s.pod.cpu_limit_utilization diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go index e57a04769569..395568cf78c3 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go @@ -98,7 +98,7 @@ type metricContainerCPUUsage struct { func (m *metricContainerCPUUsage) init() { m.data.SetName("container.cpu.usage") m.data.SetDescription("Container CPU usage") - m.data.SetUnit("nanoseconds") + m.data.SetUnit("s") m.data.SetEmptyGauge() } @@ -935,7 +935,7 @@ type metricK8sNodeCPUUsage struct { func (m *metricK8sNodeCPUUsage) init() { m.data.SetName("k8s.node.cpu.usage") m.data.SetDescription("Node CPU usage") - m.data.SetUnit("nanoseconds") + m.data.SetUnit("s") m.data.SetEmptyGauge() } @@ -1684,7 +1684,7 @@ type metricK8sPodCPUUsage struct { func (m *metricK8sPodCPUUsage) init() { m.data.SetName("k8s.pod.cpu.usage") m.data.SetDescription("Pod CPU usage") - m.data.SetUnit("nanoseconds") + m.data.SetUnit("s") m.data.SetEmptyGauge() } @@ -2890,14 +2890,14 @@ func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { } func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.CreateSettings, options ...metricBuilderOption) *MetricsBuilder { - if !mbc.Metrics.ContainerCPUUtilization.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `container.cpu.utilization`: WARNING: This metric will be disabled in a future release. Use metric container.cpu.usage instead.") + if mbc.Metrics.ContainerCPUUtilization.Enabled { + settings.Logger.Warn("[WARNING] `container.cpu.utilization` should not be enabled: WARNING: This metric will be disabled in a future release. Use metric container.cpu.usage instead.") } - if !mbc.Metrics.K8sNodeCPUUtilization.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `k8s.node.cpu.utilization`: WARNING: This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead.") + if mbc.Metrics.K8sNodeCPUUtilization.Enabled { + settings.Logger.Warn("[WARNING] `k8s.node.cpu.utilization` should not be enabled: WARNING: This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead.") } - if !mbc.Metrics.K8sPodCPUUtilization.enabledSetByUser { - settings.Logger.Warn("[WARNING] Please set `enabled` field explicitly for `k8s.pod.cpu.utilization`: This metric will be disabled in a future release. Use metric k8s.pod.cpu.usage instead.") + if mbc.Metrics.K8sPodCPUUtilization.Enabled { + settings.Logger.Warn("[WARNING] `k8s.pod.cpu.utilization` should not be enabled: This metric will be disabled in a future release. Use metric k8s.pod.cpu.usage instead.") } mb := &MetricsBuilder{ config: mbc, diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go index 0b554eb54f55..fd5f39b49181 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go @@ -49,16 +49,16 @@ func TestMetricsBuilder(t *testing.T) { mb := NewMetricsBuilder(loadMetricsBuilderConfig(t, test.name), settings, WithStartTime(start)) expectedWarnings := 0 - if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `container.cpu.utilization`: WARNING: This metric will be disabled in a future release. Use metric container.cpu.usage instead.", observedLogs.All()[expectedWarnings].Message) + if test.configSet == testSetDefault || test.configSet == testSetAll { + assert.Equal(t, "[WARNING] `container.cpu.utilization` should not be enabled: WARNING: This metric will be disabled in a future release. Use metric container.cpu.usage instead.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } - if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `k8s.node.cpu.utilization`: WARNING: This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead.", observedLogs.All()[expectedWarnings].Message) + if test.configSet == testSetDefault || test.configSet == testSetAll { + assert.Equal(t, "[WARNING] `k8s.node.cpu.utilization` should not be enabled: WARNING: This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } - if test.configSet == testSetDefault { - assert.Equal(t, "[WARNING] Please set `enabled` field explicitly for `k8s.pod.cpu.utilization`: This metric will be disabled in a future release. Use metric k8s.pod.cpu.usage instead.", observedLogs.All()[expectedWarnings].Message) + if test.configSet == testSetDefault || test.configSet == testSetAll { + assert.Equal(t, "[WARNING] `k8s.pod.cpu.utilization` should not be enabled: This metric will be disabled in a future release. Use metric k8s.pod.cpu.usage instead.", observedLogs.All()[expectedWarnings].Message) expectedWarnings++ } @@ -335,7 +335,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Container CPU usage", ms.At(i).Description()) - assert.Equal(t, "nanoseconds", ms.At(i).Unit()) + assert.Equal(t, "s", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -543,7 +543,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Node CPU usage", ms.At(i).Description()) - assert.Equal(t, "nanoseconds", ms.At(i).Unit()) + assert.Equal(t, "s", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -743,7 +743,7 @@ func TestMetricsBuilder(t *testing.T) { assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) assert.Equal(t, "Pod CPU usage", ms.At(i).Description()) - assert.Equal(t, "nanoseconds", ms.At(i).Unit()) + assert.Equal(t, "s", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) diff --git a/receiver/kubeletstatsreceiver/metadata.yaml b/receiver/kubeletstatsreceiver/metadata.yaml index 88f3c329078d..7ac7e7af99e2 100644 --- a/receiver/kubeletstatsreceiver/metadata.yaml +++ b/receiver/kubeletstatsreceiver/metadata.yaml @@ -84,7 +84,7 @@ metrics: k8s.node.cpu.usage: enabled: false description: "Node CPU usage" - unit: nanoseconds + unit: s gauge: value_type: double attributes: [] @@ -92,7 +92,7 @@ metrics: enabled: true description: "Node CPU utilization" warnings: - if_enabled_not_set: "WARNING: This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead." + if_enabled: "WARNING: This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead." unit: 1 gauge: value_type: double @@ -199,7 +199,7 @@ metrics: k8s.pod.cpu.usage: enabled: false description: "Pod CPU usage" - unit: nanoseconds + unit: s gauge: value_type: double attributes: [ ] @@ -207,7 +207,7 @@ metrics: enabled: true description: "Pod CPU utilization" warnings: - if_enabled_not_set: "This metric will be disabled in a future release. Use metric k8s.pod.cpu.usage instead." + if_enabled: "This metric will be disabled in a future release. Use metric k8s.pod.cpu.usage instead." unit: 1 gauge: value_type: double @@ -342,7 +342,7 @@ metrics: container.cpu.usage: enabled: false description: "Container CPU usage" - unit: nanoseconds + unit: s gauge: value_type: double attributes: [ ] @@ -350,7 +350,7 @@ metrics: enabled: true description: "Container CPU utilization" warnings: - if_enabled_not_set: "WARNING: This metric will be disabled in a future release. Use metric container.cpu.usage instead." + if_enabled: "WARNING: This metric will be disabled in a future release. Use metric container.cpu.usage instead." unit: 1 gauge: value_type: double From c018ee0ec3ed7b9f41f18b52192c33a973cba887 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Wed, 10 Jan 2024 08:37:51 -0700 Subject: [PATCH 5/8] Update descriptions --- receiver/kubeletstatsreceiver/metadata.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/receiver/kubeletstatsreceiver/metadata.yaml b/receiver/kubeletstatsreceiver/metadata.yaml index 7ac7e7af99e2..530c6318bce2 100644 --- a/receiver/kubeletstatsreceiver/metadata.yaml +++ b/receiver/kubeletstatsreceiver/metadata.yaml @@ -83,7 +83,7 @@ attributes: metrics: k8s.node.cpu.usage: enabled: false - description: "Node CPU usage" + description: "Total CPU usage (sum of all cores per second) averaged over the sample window" unit: s gauge: value_type: double @@ -99,7 +99,7 @@ metrics: attributes: [] k8s.node.cpu.time: enabled: true - description: "Node CPU time" + description: "Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation" unit: s sum: value_type: double @@ -198,7 +198,7 @@ metrics: attributes: [] k8s.pod.cpu.usage: enabled: false - description: "Pod CPU usage" + description: "Total CPU usage (sum of all cores per second) averaged over the sample window" unit: s gauge: value_type: double @@ -214,7 +214,7 @@ metrics: attributes: [ ] k8s.pod.cpu.time: enabled: true - description: "Pod CPU time" + description: "Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation" unit: s sum: value_type: double @@ -341,7 +341,7 @@ metrics: attributes: [] container.cpu.usage: enabled: false - description: "Container CPU usage" + description: "Total CPU usage (sum of all cores per second) averaged over the sample window" unit: s gauge: value_type: double @@ -357,7 +357,7 @@ metrics: attributes: [ ] container.cpu.time: enabled: true - description: "Container CPU time" + description: "Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation" unit: s sum: value_type: double From 10f0f503b378678c0fab6c9b0f4d9dbf59c429fc Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Thu, 11 Jan 2024 09:15:01 -0700 Subject: [PATCH 6/8] Update unit --- receiver/kubeletstatsreceiver/documentation.md | 18 +++++++++--------- .../internal/metadata/generated_metrics.go | 18 +++++++++--------- .../metadata/generated_metrics_test.go | 18 +++++++++--------- receiver/kubeletstatsreceiver/metadata.yaml | 6 +++--- 4 files changed, 30 insertions(+), 30 deletions(-) diff --git a/receiver/kubeletstatsreceiver/documentation.md b/receiver/kubeletstatsreceiver/documentation.md index 5f37fd740a27..7f9687acc30b 100644 --- a/receiver/kubeletstatsreceiver/documentation.md +++ b/receiver/kubeletstatsreceiver/documentation.md @@ -14,7 +14,7 @@ metrics: ### container.cpu.time -Container CPU time +Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation | Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | | ---- | ----------- | ---------- | ----------------------- | --------- | @@ -102,7 +102,7 @@ Container memory working_set ### k8s.node.cpu.time -Node CPU time +Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation | Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | | ---- | ----------- | ---------- | ----------------------- | --------- | @@ -220,7 +220,7 @@ Node network IO ### k8s.pod.cpu.time -Pod CPU time +Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation | Unit | Metric Type | Value Type | Aggregation Temporality | Monotonic | | ---- | ----------- | ---------- | ----------------------- | --------- | @@ -388,11 +388,11 @@ metrics: ### container.cpu.usage -Container CPU usage +Total CPU usage (sum of all cores per second) averaged over the sample window | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| s | Gauge | Double | +| {cpu} | Gauge | Double | ### container.uptime @@ -436,11 +436,11 @@ Container memory utilization as a ratio of the container's requests ### k8s.node.cpu.usage -Node CPU usage +Total CPU usage (sum of all cores per second) averaged over the sample window | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| s | Gauge | Double | +| {cpu} | Gauge | Double | ### k8s.node.uptime @@ -452,11 +452,11 @@ The time since the node started ### k8s.pod.cpu.usage -Pod CPU usage +Total CPU usage (sum of all cores per second) averaged over the sample window | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | -| s | Gauge | Double | +| {cpu} | Gauge | Double | ### k8s.pod.cpu_limit_utilization diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go index 395568cf78c3..3f312391aba5 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go @@ -46,7 +46,7 @@ type metricContainerCPUTime struct { // init fills container.cpu.time metric with initial data. func (m *metricContainerCPUTime) init() { m.data.SetName("container.cpu.time") - m.data.SetDescription("Container CPU time") + m.data.SetDescription("Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation") m.data.SetUnit("s") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) @@ -97,8 +97,8 @@ type metricContainerCPUUsage struct { // init fills container.cpu.usage metric with initial data. func (m *metricContainerCPUUsage) init() { m.data.SetName("container.cpu.usage") - m.data.SetDescription("Container CPU usage") - m.data.SetUnit("s") + m.data.SetDescription("Total CPU usage (sum of all cores per second) averaged over the sample window") + m.data.SetUnit("{cpu}") m.data.SetEmptyGauge() } @@ -883,7 +883,7 @@ type metricK8sNodeCPUTime struct { // init fills k8s.node.cpu.time metric with initial data. func (m *metricK8sNodeCPUTime) init() { m.data.SetName("k8s.node.cpu.time") - m.data.SetDescription("Node CPU time") + m.data.SetDescription("Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation") m.data.SetUnit("s") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) @@ -934,8 +934,8 @@ type metricK8sNodeCPUUsage struct { // init fills k8s.node.cpu.usage metric with initial data. func (m *metricK8sNodeCPUUsage) init() { m.data.SetName("k8s.node.cpu.usage") - m.data.SetDescription("Node CPU usage") - m.data.SetUnit("s") + m.data.SetDescription("Total CPU usage (sum of all cores per second) averaged over the sample window") + m.data.SetUnit("{cpu}") m.data.SetEmptyGauge() } @@ -1632,7 +1632,7 @@ type metricK8sPodCPUTime struct { // init fills k8s.pod.cpu.time metric with initial data. func (m *metricK8sPodCPUTime) init() { m.data.SetName("k8s.pod.cpu.time") - m.data.SetDescription("Pod CPU time") + m.data.SetDescription("Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation") m.data.SetUnit("s") m.data.SetEmptySum() m.data.Sum().SetIsMonotonic(true) @@ -1683,8 +1683,8 @@ type metricK8sPodCPUUsage struct { // init fills k8s.pod.cpu.usage metric with initial data. func (m *metricK8sPodCPUUsage) init() { m.data.SetName("k8s.pod.cpu.usage") - m.data.SetDescription("Pod CPU usage") - m.data.SetUnit("s") + m.data.SetDescription("Total CPU usage (sum of all cores per second) averaged over the sample window") + m.data.SetUnit("{cpu}") m.data.SetEmptyGauge() } diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go index fd5f39b49181..ad03beaee512 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go @@ -320,7 +320,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["container.cpu.time"] = true assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Container CPU time", ms.At(i).Description()) + assert.Equal(t, "Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation", ms.At(i).Description()) assert.Equal(t, "s", ms.At(i).Unit()) assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) @@ -334,8 +334,8 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["container.cpu.usage"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Container CPU usage", ms.At(i).Description()) - assert.Equal(t, "s", ms.At(i).Unit()) + assert.Equal(t, "Total CPU usage (sum of all cores per second) averaged over the sample window", ms.At(i).Description()) + assert.Equal(t, "{cpu}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -528,7 +528,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["k8s.node.cpu.time"] = true assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Node CPU time", ms.At(i).Description()) + assert.Equal(t, "Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation", ms.At(i).Description()) assert.Equal(t, "s", ms.At(i).Unit()) assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) @@ -542,8 +542,8 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["k8s.node.cpu.usage"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Node CPU usage", ms.At(i).Description()) - assert.Equal(t, "s", ms.At(i).Unit()) + assert.Equal(t, "Total CPU usage (sum of all cores per second) averaged over the sample window", ms.At(i).Description()) + assert.Equal(t, "{cpu}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) @@ -728,7 +728,7 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["k8s.pod.cpu.time"] = true assert.Equal(t, pmetric.MetricTypeSum, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Sum().DataPoints().Len()) - assert.Equal(t, "Pod CPU time", ms.At(i).Description()) + assert.Equal(t, "Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation", ms.At(i).Description()) assert.Equal(t, "s", ms.At(i).Unit()) assert.Equal(t, true, ms.At(i).Sum().IsMonotonic()) assert.Equal(t, pmetric.AggregationTemporalityCumulative, ms.At(i).Sum().AggregationTemporality()) @@ -742,8 +742,8 @@ func TestMetricsBuilder(t *testing.T) { validatedMetrics["k8s.pod.cpu.usage"] = true assert.Equal(t, pmetric.MetricTypeGauge, ms.At(i).Type()) assert.Equal(t, 1, ms.At(i).Gauge().DataPoints().Len()) - assert.Equal(t, "Pod CPU usage", ms.At(i).Description()) - assert.Equal(t, "s", ms.At(i).Unit()) + assert.Equal(t, "Total CPU usage (sum of all cores per second) averaged over the sample window", ms.At(i).Description()) + assert.Equal(t, "{cpu}", ms.At(i).Unit()) dp := ms.At(i).Gauge().DataPoints().At(0) assert.Equal(t, start, dp.StartTimestamp()) assert.Equal(t, ts, dp.Timestamp()) diff --git a/receiver/kubeletstatsreceiver/metadata.yaml b/receiver/kubeletstatsreceiver/metadata.yaml index 00948e4c8b08..1fb375175825 100644 --- a/receiver/kubeletstatsreceiver/metadata.yaml +++ b/receiver/kubeletstatsreceiver/metadata.yaml @@ -84,7 +84,7 @@ metrics: k8s.node.cpu.usage: enabled: false description: "Total CPU usage (sum of all cores per second) averaged over the sample window" - unit: s + unit: "{cpu}" gauge: value_type: double attributes: [] @@ -199,7 +199,7 @@ metrics: k8s.pod.cpu.usage: enabled: false description: "Total CPU usage (sum of all cores per second) averaged over the sample window" - unit: s + unit: "{cpu}" gauge: value_type: double attributes: [ ] @@ -342,7 +342,7 @@ metrics: container.cpu.usage: enabled: false description: "Total CPU usage (sum of all cores per second) averaged over the sample window" - unit: s + unit: "{cpu}" gauge: value_type: double attributes: [ ] From d7ad18102f7acfe961df6d9d2a888d76b720affd Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Thu, 11 Jan 2024 10:12:31 -0700 Subject: [PATCH 7/8] Update test expectation --- receiver/kubeletstatsreceiver/testdata/e2e/expected.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/receiver/kubeletstatsreceiver/testdata/e2e/expected.yaml b/receiver/kubeletstatsreceiver/testdata/e2e/expected.yaml index f717b48e251d..1bc2dab1bd01 100644 --- a/receiver/kubeletstatsreceiver/testdata/e2e/expected.yaml +++ b/receiver/kubeletstatsreceiver/testdata/e2e/expected.yaml @@ -10,7 +10,7 @@ resourceMetrics: version: latest metrics: - name: k8s.node.cpu.time - description: Node CPU time + description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation unit: s sum: dataPoints: From f9dd0351823087ee56df264847579a93373ca3a8 Mon Sep 17 00:00:00 2001 From: Tyler Helmuth <12352919+TylerHelmuth@users.noreply.github.com> Date: Thu, 11 Jan 2024 10:41:01 -0700 Subject: [PATCH 8/8] Update descriptions --- .../scraper/test_scraper_expected.yaml | 38 +++++++++---------- ..._metadata_Container_Metadata_expected.yaml | 18 ++++----- ...ith_metric_groups_all_groups_expected.yaml | 38 +++++++++---------- ..._groups_only_container_group_expected.yaml | 18 ++++----- ...etric_groups_only_node_group_expected.yaml | 2 +- ...metric_groups_only_pod_group_expected.yaml | 18 ++++----- ...c_groups_pod_and_node_groups_expected.yaml | 20 +++++----- 7 files changed, 76 insertions(+), 76 deletions(-) diff --git a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_expected.yaml b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_expected.yaml index 7f9704a8d22e..0188167b1360 100644 --- a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_expected.yaml +++ b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_expected.yaml @@ -6,7 +6,7 @@ resourceMetrics: stringValue: minikube scopeMetrics: - metrics: - - description: Node CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.node.cpu.time sum: aggregationTemporality: 2 @@ -166,7 +166,7 @@ resourceMetrics: stringValue: 42ad382b-ed0b-446d-9aab-3fdce8b4f9e2 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -326,7 +326,7 @@ resourceMetrics: stringValue: eb632b33-62c6-4a80-9575-a97ab363ad7f scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -486,7 +486,7 @@ resourceMetrics: stringValue: 0adffe8e-9849-4e05-b4cd-92d2d1e1f1c3 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -646,7 +646,7 @@ resourceMetrics: stringValue: 5a5fbd34cfb43ee7bee976798370c910 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -806,7 +806,7 @@ resourceMetrics: stringValue: 3bef16d65fa74d46458df57d8f6f59af scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -966,7 +966,7 @@ resourceMetrics: stringValue: 3016593d20758bbfe68aba26604a8e3d scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -1126,7 +1126,7 @@ resourceMetrics: stringValue: 0a6d6b05-0e8d-4920-8a38-926a33164d45 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -1286,7 +1286,7 @@ resourceMetrics: stringValue: 5795d0c442cb997ff93c49feeb9f6386 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -1446,7 +1446,7 @@ resourceMetrics: stringValue: 14bf95e0-9451-4192-b111-807b03163670 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -1609,7 +1609,7 @@ resourceMetrics: stringValue: eb632b33-62c6-4a80-9575-a97ab363ad7f scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -1718,7 +1718,7 @@ resourceMetrics: stringValue: 0adffe8e-9849-4e05-b4cd-92d2d1e1f1c3 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -1827,7 +1827,7 @@ resourceMetrics: stringValue: 5a5fbd34cfb43ee7bee976798370c910 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -1936,7 +1936,7 @@ resourceMetrics: stringValue: 3bef16d65fa74d46458df57d8f6f59af scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -2045,7 +2045,7 @@ resourceMetrics: stringValue: 3016593d20758bbfe68aba26604a8e3d scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -2154,7 +2154,7 @@ resourceMetrics: stringValue: 0a6d6b05-0e8d-4920-8a38-926a33164d45 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -2263,7 +2263,7 @@ resourceMetrics: stringValue: 5795d0c442cb997ff93c49feeb9f6386 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -2372,7 +2372,7 @@ resourceMetrics: stringValue: 42ad382b-ed0b-446d-9aab-3fdce8b4f9e2 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -2481,7 +2481,7 @@ resourceMetrics: stringValue: 14bf95e0-9451-4192-b111-807b03163670 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 diff --git a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metadata_Container_Metadata_expected.yaml b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metadata_Container_Metadata_expected.yaml index dabd51f64e25..f2ed413292e7 100644 --- a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metadata_Container_Metadata_expected.yaml +++ b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metadata_Container_Metadata_expected.yaml @@ -18,7 +18,7 @@ resourceMetrics: stringValue: 5795d0c442cb997ff93c49feeb9f6386 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -130,7 +130,7 @@ resourceMetrics: stringValue: 0a6d6b05-0e8d-4920-8a38-926a33164d45 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -242,7 +242,7 @@ resourceMetrics: stringValue: eb632b33-62c6-4a80-9575-a97ab363ad7f scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -354,7 +354,7 @@ resourceMetrics: stringValue: 3bef16d65fa74d46458df57d8f6f59af scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -466,7 +466,7 @@ resourceMetrics: stringValue: 5a5fbd34cfb43ee7bee976798370c910 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -578,7 +578,7 @@ resourceMetrics: stringValue: 14bf95e0-9451-4192-b111-807b03163670 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -690,7 +690,7 @@ resourceMetrics: stringValue: 0adffe8e-9849-4e05-b4cd-92d2d1e1f1c3 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -802,7 +802,7 @@ resourceMetrics: stringValue: 3016593d20758bbfe68aba26604a8e3d scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -914,7 +914,7 @@ resourceMetrics: stringValue: 42ad382b-ed0b-446d-9aab-3fdce8b4f9e2 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 diff --git a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_all_groups_expected.yaml b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_all_groups_expected.yaml index da410967cd78..89960272580b 100644 --- a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_all_groups_expected.yaml +++ b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_all_groups_expected.yaml @@ -6,7 +6,7 @@ resourceMetrics: stringValue: minikube scopeMetrics: - metrics: - - description: Node CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.node.cpu.time sum: aggregationTemporality: 2 @@ -166,7 +166,7 @@ resourceMetrics: stringValue: 42ad382b-ed0b-446d-9aab-3fdce8b4f9e2 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -326,7 +326,7 @@ resourceMetrics: stringValue: eb632b33-62c6-4a80-9575-a97ab363ad7f scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -486,7 +486,7 @@ resourceMetrics: stringValue: 0adffe8e-9849-4e05-b4cd-92d2d1e1f1c3 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -646,7 +646,7 @@ resourceMetrics: stringValue: 5a5fbd34cfb43ee7bee976798370c910 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -806,7 +806,7 @@ resourceMetrics: stringValue: 3bef16d65fa74d46458df57d8f6f59af scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -966,7 +966,7 @@ resourceMetrics: stringValue: 3016593d20758bbfe68aba26604a8e3d scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -1126,7 +1126,7 @@ resourceMetrics: stringValue: 0a6d6b05-0e8d-4920-8a38-926a33164d45 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -1286,7 +1286,7 @@ resourceMetrics: stringValue: 5795d0c442cb997ff93c49feeb9f6386 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -1446,7 +1446,7 @@ resourceMetrics: stringValue: 14bf95e0-9451-4192-b111-807b03163670 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -2084,7 +2084,7 @@ resourceMetrics: stringValue: 5795d0c442cb997ff93c49feeb9f6386 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -2196,7 +2196,7 @@ resourceMetrics: stringValue: 0a6d6b05-0e8d-4920-8a38-926a33164d45 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -2308,7 +2308,7 @@ resourceMetrics: stringValue: eb632b33-62c6-4a80-9575-a97ab363ad7f scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -2420,7 +2420,7 @@ resourceMetrics: stringValue: 3bef16d65fa74d46458df57d8f6f59af scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -2532,7 +2532,7 @@ resourceMetrics: stringValue: 5a5fbd34cfb43ee7bee976798370c910 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -2644,7 +2644,7 @@ resourceMetrics: stringValue: 14bf95e0-9451-4192-b111-807b03163670 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -2756,7 +2756,7 @@ resourceMetrics: stringValue: 0adffe8e-9849-4e05-b4cd-92d2d1e1f1c3 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -2868,7 +2868,7 @@ resourceMetrics: stringValue: 3016593d20758bbfe68aba26604a8e3d scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -2980,7 +2980,7 @@ resourceMetrics: stringValue: 42ad382b-ed0b-446d-9aab-3fdce8b4f9e2 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 diff --git a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_only_container_group_expected.yaml b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_only_container_group_expected.yaml index dabd51f64e25..f2ed413292e7 100644 --- a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_only_container_group_expected.yaml +++ b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_only_container_group_expected.yaml @@ -18,7 +18,7 @@ resourceMetrics: stringValue: 5795d0c442cb997ff93c49feeb9f6386 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -130,7 +130,7 @@ resourceMetrics: stringValue: 0a6d6b05-0e8d-4920-8a38-926a33164d45 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -242,7 +242,7 @@ resourceMetrics: stringValue: eb632b33-62c6-4a80-9575-a97ab363ad7f scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -354,7 +354,7 @@ resourceMetrics: stringValue: 3bef16d65fa74d46458df57d8f6f59af scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -466,7 +466,7 @@ resourceMetrics: stringValue: 5a5fbd34cfb43ee7bee976798370c910 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -578,7 +578,7 @@ resourceMetrics: stringValue: 14bf95e0-9451-4192-b111-807b03163670 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -690,7 +690,7 @@ resourceMetrics: stringValue: 0adffe8e-9849-4e05-b4cd-92d2d1e1f1c3 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -802,7 +802,7 @@ resourceMetrics: stringValue: 3016593d20758bbfe68aba26604a8e3d scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 @@ -914,7 +914,7 @@ resourceMetrics: stringValue: 42ad382b-ed0b-446d-9aab-3fdce8b4f9e2 scopeMetrics: - metrics: - - description: Container CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: container.cpu.time sum: aggregationTemporality: 2 diff --git a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_only_node_group_expected.yaml b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_only_node_group_expected.yaml index 3b72325e4d27..e263f6a7404b 100644 --- a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_only_node_group_expected.yaml +++ b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_only_node_group_expected.yaml @@ -6,7 +6,7 @@ resourceMetrics: stringValue: minikube scopeMetrics: - metrics: - - description: Node CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.node.cpu.time sum: aggregationTemporality: 2 diff --git a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_only_pod_group_expected.yaml b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_only_pod_group_expected.yaml index 3c61724a19b1..4886e24e7123 100644 --- a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_only_pod_group_expected.yaml +++ b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_only_pod_group_expected.yaml @@ -12,7 +12,7 @@ resourceMetrics: stringValue: 42ad382b-ed0b-446d-9aab-3fdce8b4f9e2 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -172,7 +172,7 @@ resourceMetrics: stringValue: eb632b33-62c6-4a80-9575-a97ab363ad7f scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -332,7 +332,7 @@ resourceMetrics: stringValue: 0adffe8e-9849-4e05-b4cd-92d2d1e1f1c3 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -492,7 +492,7 @@ resourceMetrics: stringValue: 5a5fbd34cfb43ee7bee976798370c910 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -652,7 +652,7 @@ resourceMetrics: stringValue: 3bef16d65fa74d46458df57d8f6f59af scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -812,7 +812,7 @@ resourceMetrics: stringValue: 3016593d20758bbfe68aba26604a8e3d scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -972,7 +972,7 @@ resourceMetrics: stringValue: 0a6d6b05-0e8d-4920-8a38-926a33164d45 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -1132,7 +1132,7 @@ resourceMetrics: stringValue: 5795d0c442cb997ff93c49feeb9f6386 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -1292,7 +1292,7 @@ resourceMetrics: stringValue: 14bf95e0-9451-4192-b111-807b03163670 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 diff --git a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_pod_and_node_groups_expected.yaml b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_pod_and_node_groups_expected.yaml index a7936daac1ed..56422e8b848d 100644 --- a/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_pod_and_node_groups_expected.yaml +++ b/receiver/kubeletstatsreceiver/testdata/scraper/test_scraper_with_metric_groups_pod_and_node_groups_expected.yaml @@ -6,7 +6,7 @@ resourceMetrics: stringValue: minikube scopeMetrics: - metrics: - - description: Node CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.node.cpu.time sum: aggregationTemporality: 2 @@ -166,7 +166,7 @@ resourceMetrics: stringValue: 42ad382b-ed0b-446d-9aab-3fdce8b4f9e2 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -326,7 +326,7 @@ resourceMetrics: stringValue: eb632b33-62c6-4a80-9575-a97ab363ad7f scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -486,7 +486,7 @@ resourceMetrics: stringValue: 0adffe8e-9849-4e05-b4cd-92d2d1e1f1c3 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -646,7 +646,7 @@ resourceMetrics: stringValue: 5a5fbd34cfb43ee7bee976798370c910 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -806,7 +806,7 @@ resourceMetrics: stringValue: 3bef16d65fa74d46458df57d8f6f59af scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -966,7 +966,7 @@ resourceMetrics: stringValue: 3016593d20758bbfe68aba26604a8e3d scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -1126,7 +1126,7 @@ resourceMetrics: stringValue: 0a6d6b05-0e8d-4920-8a38-926a33164d45 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -1286,7 +1286,7 @@ resourceMetrics: stringValue: 5795d0c442cb997ff93c49feeb9f6386 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2 @@ -1446,7 +1446,7 @@ resourceMetrics: stringValue: 14bf95e0-9451-4192-b111-807b03163670 scopeMetrics: - metrics: - - description: Pod CPU time + - description: Total cumulative CPU time (sum of all cores) spent by the container/pod/node since its creation name: k8s.pod.cpu.time sum: aggregationTemporality: 2