Skip to content

Commit

Permalink
Use UInt64 for fields holding big positive integer values (#307)
Browse files Browse the repository at this point in the history
* Use UInt64 for fields holding big positive integer values

* Update snaps
  • Loading branch information
kuskoman committed Mar 4, 2024
1 parent d714378 commit 31f05cf
Show file tree
Hide file tree
Showing 6 changed files with 70 additions and 70 deletions.
20 changes: 10 additions & 10 deletions internal/collectors/nodestats/nodestats_collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -242,14 +242,14 @@ func (collector *NodestatsCollector) collectSingleInstance(client logstash_clien

// ************ PROCESS ************
procStats := nodeStats.Process
metricsHelper.NewInt64Metric(collector.ProcessOpenFileDescriptors, prometheus.GaugeValue, procStats.OpenFileDescriptors)
metricsHelper.NewInt64Metric(collector.ProcessMaxFileDescriptors, prometheus.GaugeValue, procStats.MaxFileDescriptors)
metricsHelper.NewInt64Metric(collector.ProcessCpuPercent, prometheus.GaugeValue, procStats.CPU.Percent)
metricsHelper.NewInt64Metric(collector.ProcessCpuTotalMillis, prometheus.CounterValue, procStats.CPU.TotalInMillis)
metricsHelper.NewUInt64Metric(collector.ProcessOpenFileDescriptors, prometheus.GaugeValue, procStats.OpenFileDescriptors)
metricsHelper.NewUInt64Metric(collector.ProcessMaxFileDescriptors, prometheus.GaugeValue, procStats.MaxFileDescriptors)
metricsHelper.NewIntMetric(collector.ProcessCpuPercent, prometheus.GaugeValue, procStats.CPU.Percent)
metricsHelper.NewUInt64Metric(collector.ProcessCpuTotalMillis, prometheus.CounterValue, procStats.CPU.TotalInMillis)
metricsHelper.NewFloatMetric(collector.ProcessCpuLoadAverageOneM, prometheus.GaugeValue, procStats.CPU.LoadAverage.OneM)
metricsHelper.NewFloatMetric(collector.ProcessCpuLoadAverageFiveM, prometheus.GaugeValue, procStats.CPU.LoadAverage.FiveM)
metricsHelper.NewFloatMetric(collector.ProcessCpuLoadAverageFifteenM, prometheus.GaugeValue, procStats.CPU.LoadAverage.FifteenM)
metricsHelper.NewInt64Metric(collector.ProcessMemTotalVirtual, prometheus.GaugeValue, procStats.Mem.TotalVirtualInBytes)
metricsHelper.NewUInt64Metric(collector.ProcessMemTotalVirtual, prometheus.GaugeValue, procStats.Mem.TotalVirtualInBytes)
// *********************************

// ************ RELOADS ************
Expand All @@ -263,11 +263,11 @@ func (collector *NodestatsCollector) collectSingleInstance(client logstash_clien

// ************ EVENTS ************
eventsStats := nodeStats.Events
metricsHelper.NewInt64Metric(collector.EventsIn, prometheus.CounterValue, eventsStats.In)
metricsHelper.NewInt64Metric(collector.EventsFiltered, prometheus.CounterValue, eventsStats.Filtered)
metricsHelper.NewInt64Metric(collector.EventsOut, prometheus.CounterValue, eventsStats.Out)
metricsHelper.NewInt64Metric(collector.EventsDurationInMillis, prometheus.GaugeValue, eventsStats.DurationInMillis)
metricsHelper.NewInt64Metric(collector.EventsQueuePushDurationInMillis, prometheus.GaugeValue, eventsStats.QueuePushDurationInMillis)
metricsHelper.NewUInt64Metric(collector.EventsIn, prometheus.CounterValue, eventsStats.In)
metricsHelper.NewUInt64Metric(collector.EventsFiltered, prometheus.CounterValue, eventsStats.Filtered)
metricsHelper.NewUInt64Metric(collector.EventsOut, prometheus.CounterValue, eventsStats.Out)
metricsHelper.NewUInt64Metric(collector.EventsDurationInMillis, prometheus.GaugeValue, eventsStats.DurationInMillis)
metricsHelper.NewUInt64Metric(collector.EventsQueuePushDurationInMillis, prometheus.GaugeValue, eventsStats.QueuePushDurationInMillis)
// ********************************

// ************ FLOW ************
Expand Down
12 changes: 6 additions & 6 deletions internal/collectors/nodestats/pipeline_subcollector.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,9 +142,9 @@ func (subcollector *PipelineSubcollector) Collect(pipeStats *responses.SinglePip
// *******************

// ***** QUEUE *****
metricsHelper.NewInt64Metric(subcollector.QueueEventsCount, prometheus.CounterValue, pipeStats.Queue.EventsCount)
metricsHelper.NewInt64Metric(subcollector.QueueEventsQueueSize, prometheus.GaugeValue, pipeStats.Queue.QueueSizeInBytes)
metricsHelper.NewInt64Metric(subcollector.QueueMaxQueueSizeInBytes, prometheus.GaugeValue, pipeStats.Queue.MaxQueueSizeInBytes)
metricsHelper.NewUInt64Metric(subcollector.QueueEventsCount, prometheus.CounterValue, pipeStats.Queue.EventsCount)
metricsHelper.NewUInt64Metric(subcollector.QueueEventsQueueSize, prometheus.GaugeValue, pipeStats.Queue.QueueSizeInBytes)
metricsHelper.NewUInt64Metric(subcollector.QueueMaxQueueSizeInBytes, prometheus.GaugeValue, pipeStats.Queue.MaxQueueSizeInBytes)
// *****************

// ***** FLOW *****
Expand All @@ -164,9 +164,9 @@ func (subcollector *PipelineSubcollector) Collect(pipeStats *responses.SinglePip
// ***** DEAD LETTER QUEUE *****
deadLetterQueueStats := pipeStats.DeadLetterQueue
metricsHelper.NewIntMetric(subcollector.DeadLetterQueueMaxSizeInBytes, prometheus.GaugeValue, deadLetterQueueStats.MaxQueueSizeInBytes)
metricsHelper.NewInt64Metric(subcollector.DeadLetterQueueSizeInBytes, prometheus.GaugeValue, deadLetterQueueStats.QueueSizeInBytes)
metricsHelper.NewInt64Metric(subcollector.DeadLetterQueueDroppedEvents, prometheus.CounterValue, deadLetterQueueStats.DroppedEvents)
metricsHelper.NewInt64Metric(subcollector.DeadLetterQueueExpiredEvents, prometheus.CounterValue, deadLetterQueueStats.ExpiredEvents)
metricsHelper.NewUInt64Metric(subcollector.DeadLetterQueueSizeInBytes, prometheus.GaugeValue, deadLetterQueueStats.QueueSizeInBytes)
metricsHelper.NewUInt64Metric(subcollector.DeadLetterQueueDroppedEvents, prometheus.CounterValue, deadLetterQueueStats.DroppedEvents)
metricsHelper.NewUInt64Metric(subcollector.DeadLetterQueueExpiredEvents, prometheus.CounterValue, deadLetterQueueStats.ExpiredEvents)
// *****************************

// ===== PLUGINS =====
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,17 +35,17 @@ responses.NodeStatsResponse{
UptimeInMillis: 53120,
},
Process: responses.ProcessResponse{
OpenFileDescriptors: 98,
OpenFileDescriptors: 0x62,
PeakOpenFileDescriptors: 98,
MaxFileDescriptors: 1048576,
Mem: struct { TotalVirtualInBytes int64 "json:\"total_virtual_in_bytes\"" }{TotalVirtualInBytes:9305346048},
CPU: struct { TotalInMillis int64 "json:\"total_in_millis\""; Percent int64 "json:\"percent\""; LoadAverage struct { OneM float64 "json:\"1m\""; FiveM float64 "json:\"5m\""; FifteenM float64 "json:\"15m\"" } "json:\"load_average\"" }{
TotalInMillis: 135300,
MaxFileDescriptors: 0x100000,
Mem: struct { TotalVirtualInBytes uint64 "json:\"total_virtual_in_bytes\"" }{TotalVirtualInBytes:0x22aa45000},
CPU: struct { TotalInMillis uint64 "json:\"total_in_millis\""; Percent int "json:\"percent\""; LoadAverage struct { OneM float64 "json:\"1m\""; FiveM float64 "json:\"5m\""; FifteenM float64 "json:\"15m\"" } "json:\"load_average\"" }{
TotalInMillis: 0x21084,
Percent: 0,
LoadAverage: struct { OneM float64 "json:\"1m\""; FiveM float64 "json:\"5m\""; FifteenM float64 "json:\"15m\"" }{OneM:3.79, FiveM:1.29, FifteenM:0.46},
},
},
Events: responses.EventsResponse{In:4001, Filtered:10, Out:2, DurationInMillis:5, QueuePushDurationInMillis:7},
Events: responses.EventsResponse{In:0xfa1, Filtered:0xa, Out:0x2, DurationInMillis:0x5, QueuePushDurationInMillis:0x7},
Flow: responses.FlowResponse{
InputThroughput: struct { Current float64 "json:\"current\""; Lifetime float64 "json:\"lifetime\"" }{Current:1, Lifetime:117.4},
FilterThroughput: struct { Current float64 "json:\"current\""; Lifetime float64 "json:\"lifetime\"" }{Current:2.1, Lifetime:3.2},
Expand Down Expand Up @@ -82,8 +82,8 @@ responses.NodeStatsResponse{
},
},
Reloads: responses.PipelineReloadResponse{},
Queue: struct { Type string "json:\"type\""; EventsCount int64 "json:\"events_count\""; QueueSizeInBytes int64 "json:\"queue_size_in_bytes\""; MaxQueueSizeInBytes int64 "json:\"max_queue_size_in_bytes\"" }{},
DeadLetterQueue: struct { MaxQueueSizeInBytes int "json:\"max_queue_size_in_bytes\""; QueueSizeInBytes int64 "json:\"queue_size_in_bytes\""; DroppedEvents int64 "json:\"dropped_events\""; ExpiredEvents int64 "json:\"expired_events\""; StoragePolicy string "json:\"storage_policy\"" }{},
Queue: struct { Type string "json:\"type\""; EventsCount uint64 "json:\"events_count\""; QueueSizeInBytes uint64 "json:\"queue_size_in_bytes\""; MaxQueueSizeInBytes uint64 "json:\"max_queue_size_in_bytes\"" }{},
DeadLetterQueue: struct { MaxQueueSizeInBytes int "json:\"max_queue_size_in_bytes\""; QueueSizeInBytes uint64 "json:\"queue_size_in_bytes\""; DroppedEvents uint64 "json:\"dropped_events\""; ExpiredEvents uint64 "json:\"expired_events\""; StoragePolicy string "json:\"storage_policy\"" }{},
Hash: "",
EphemeralID: "",
},
Expand Down Expand Up @@ -159,8 +159,8 @@ responses.NodeStatsResponse{
Backtrace: {"org/logstash/execution/AbstractPipelineExt.java:151:in `reload_pipeline'", "/usr/share/logstash/logstash-core/lib/logstash/java_pipeline.rb:181:in `block in reload_pipeline'", "/usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/stud-0.0.23/lib/stud/task.rb:24:in `block in initialize'"},
},
},
Queue: struct { Type string "json:\"type\""; EventsCount int64 "json:\"events_count\""; QueueSizeInBytes int64 "json:\"queue_size_in_bytes\""; MaxQueueSizeInBytes int64 "json:\"max_queue_size_in_bytes\"" }{Type:"memory", EventsCount:0, QueueSizeInBytes:0, MaxQueueSizeInBytes:0},
DeadLetterQueue: struct { MaxQueueSizeInBytes int "json:\"max_queue_size_in_bytes\""; QueueSizeInBytes int64 "json:\"queue_size_in_bytes\""; DroppedEvents int64 "json:\"dropped_events\""; ExpiredEvents int64 "json:\"expired_events\""; StoragePolicy string "json:\"storage_policy\"" }{MaxQueueSizeInBytes:47244640256, QueueSizeInBytes:1, DroppedEvents:0, ExpiredEvents:0, StoragePolicy:"drop_newer"},
Queue: struct { Type string "json:\"type\""; EventsCount uint64 "json:\"events_count\""; QueueSizeInBytes uint64 "json:\"queue_size_in_bytes\""; MaxQueueSizeInBytes uint64 "json:\"max_queue_size_in_bytes\"" }{Type:"memory", EventsCount:0x0, QueueSizeInBytes:0x0, MaxQueueSizeInBytes:0x0},
DeadLetterQueue: struct { MaxQueueSizeInBytes int "json:\"max_queue_size_in_bytes\""; QueueSizeInBytes uint64 "json:\"queue_size_in_bytes\""; DroppedEvents uint64 "json:\"dropped_events\""; ExpiredEvents uint64 "json:\"expired_events\""; StoragePolicy string "json:\"storage_policy\"" }{MaxQueueSizeInBytes:47244640256, QueueSizeInBytes:0x1, DroppedEvents:0x0, ExpiredEvents:0x0, StoragePolicy:"drop_newer"},
Hash: "a73729cc9c29203931db21553c5edba063820a7e40d16cb5053be75cc3811a17",
EphemeralID: "a5c63d09-1ba6-4d67-90a5-075f468a7ab0",
},
Expand Down
34 changes: 17 additions & 17 deletions internal/fetcher/responses/nodestats_response.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,15 +60,15 @@ type JvmResponse struct {
}

type ProcessResponse struct {
OpenFileDescriptors int64 `json:"open_file_descriptors"`
PeakOpenFileDescriptors int64 `json:"peak_open_file_descriptors"`
MaxFileDescriptors int64 `json:"max_file_descriptors"`
OpenFileDescriptors uint64 `json:"open_file_descriptors"`
PeakOpenFileDescriptors int64 `json:"peak_open_file_descriptors"`
MaxFileDescriptors uint64 `json:"max_file_descriptors"`
Mem struct {
TotalVirtualInBytes int64 `json:"total_virtual_in_bytes"`
TotalVirtualInBytes uint64 `json:"total_virtual_in_bytes"`
} `json:"mem"`
CPU struct {
TotalInMillis int64 `json:"total_in_millis"`
Percent int64 `json:"percent"`
TotalInMillis uint64 `json:"total_in_millis"`
Percent int `json:"percent"`
LoadAverage struct {
OneM float64 `json:"1m"`
FiveM float64 `json:"5m"`
Expand All @@ -78,11 +78,11 @@ type ProcessResponse struct {
}

type EventsResponse struct {
In int64 `json:"in"`
Filtered int64 `json:"filtered"`
Out int64 `json:"out"`
DurationInMillis int64 `json:"duration_in_millis"`
QueuePushDurationInMillis int64 `json:"queue_push_duration_in_millis"`
In uint64 `json:"in"`
Filtered uint64 `json:"filtered"`
Out uint64 `json:"out"`
DurationInMillis uint64 `json:"duration_in_millis"`
QueuePushDurationInMillis uint64 `json:"queue_push_duration_in_millis"`
}

type FlowResponse struct {
Expand Down Expand Up @@ -170,16 +170,16 @@ type SinglePipelineResponse struct {
Reloads PipelineReloadResponse `json:"reloads"`
Queue struct {
Type string `json:"type"`
EventsCount int64 `json:"events_count"`
QueueSizeInBytes int64 `json:"queue_size_in_bytes"`
MaxQueueSizeInBytes int64 `json:"max_queue_size_in_bytes"`
EventsCount uint64 `json:"events_count"`
QueueSizeInBytes uint64 `json:"queue_size_in_bytes"`
MaxQueueSizeInBytes uint64 `json:"max_queue_size_in_bytes"`
} `json:"queue"`
DeadLetterQueue struct {
MaxQueueSizeInBytes int `json:"max_queue_size_in_bytes"`
// todo: research how LastError is returned
QueueSizeInBytes int64 `json:"queue_size_in_bytes"`
DroppedEvents int64 `json:"dropped_events"`
ExpiredEvents int64 `json:"expired_events"`
QueueSizeInBytes uint64 `json:"queue_size_in_bytes"`
DroppedEvents uint64 `json:"dropped_events"`
ExpiredEvents uint64 `json:"expired_events"`
StoragePolicy string `json:"storage_policy"`
} `json:"dead_letter_queue"`
Hash string `json:"hash"`
Expand Down
16 changes: 8 additions & 8 deletions internal/prometheus_helper/prometheus_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func ExtractValueFromMetric(metric prometheus.Metric) (float64, error) {
// SimpleMetricsHelper is a helper struct that can be used to channel new prometheus.Metric objects
type SimpleMetricsHelper struct {
Channel chan<- prometheus.Metric
Labels []string
Labels []string
}

// NewFloatMetric appends new metric with the desc and metricType, value
Expand All @@ -69,23 +69,23 @@ func (mh *SimpleMetricsHelper) NewFloatMetric(desc *prometheus.Desc, metricType

// NewIntMetric same as NewFloatMetric but for 'int' type
func (mh *SimpleMetricsHelper) NewIntMetric(desc *prometheus.Desc, metricType prometheus.ValueType, value int) {
mh.NewFloatMetric(desc, metricType, float64(value))
mh.NewFloatMetric(desc, metricType, float64(value))
}

// NewInt64Metric same as NewFloatMetric but for 'int64' type
func (mh *SimpleMetricsHelper) NewInt64Metric(desc *prometheus.Desc, metricType prometheus.ValueType, value int64) {
mh.NewFloatMetric(desc, metricType, float64(value))
// NewUInt64Metric same as NewFloatMetric but for 'uint64' type
func (mh *SimpleMetricsHelper) NewUInt64Metric(desc *prometheus.Desc, metricType prometheus.ValueType, value uint64) {
mh.NewFloatMetric(desc, metricType, float64(value))
}

// newTimestampMetric same as NewFloatMetric but for setting Timestamp value
func (mh *SimpleMetricsHelper) NewTimestampMetric (desc *prometheus.Desc, metricType prometheus.ValueType, value time.Time) {
// newTimestampMetric same as NewFloatMetric but for setting Timestamp value
func (mh *SimpleMetricsHelper) NewTimestampMetric(desc *prometheus.Desc, metricType prometheus.ValueType, value time.Time) {
metric := prometheus.NewMetricWithTimestamp(value, prometheus.MustNewConstMetric(desc, metricType, 1, mh.Labels...))
mh.Channel <- metric
}

// ExtractValueFromMetric extracts the timestamp from a prometheus.Metric object.
// Useful for testing NewTimestampMetric method of SimpleMetricsHelper
// Returns the extracted timestamp in milliseconds of 'int64' type
// Returns the extracted timestamp in milliseconds of 'int64' type
func extractTimestampMsFromMetric(metric prometheus.Metric) (int64, error) {
var dtoMetric dto.Metric
err := metric.Write(&dtoMetric)
Expand Down
38 changes: 19 additions & 19 deletions internal/prometheus_helper/prometheus_helper_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,18 +119,18 @@ func TestSimpleMetricsHelper(t *testing.T) {
metricName := "test_metric"
metricDesc := prometheus.NewDesc(metricName, "test metric help", nil, nil)
metricValue := 42.0

ch := make(chan prometheus.Metric)

go func() {
helper := &SimpleMetricsHelper{
Channel: ch,
Labels: []string{},
Labels: []string{},
}
helper.NewFloatMetric(metricDesc, prometheus.GaugeValue, metricValue)
}()

metric := <- ch
metric := <-ch

fqName, err := ExtractFqName(metric.Desc().String())
if err != nil {
Expand All @@ -157,18 +157,18 @@ func TestSimpleMetricsHelper(t *testing.T) {

metricDesc := helper.NewDesc("metric", "help", "customLabel")
metricValue := 42.0

ch := make(chan prometheus.Metric)

go func() {
helper := &SimpleMetricsHelper{
Channel: ch,
Labels: []string{"customLabelValue", "hostnameEndpoint"},
Labels: []string{"customLabelValue", "hostnameEndpoint"},
}
helper.NewFloatMetric(metricDesc, prometheus.GaugeValue, metricValue)
}()

metric := <- ch
metric := <-ch

desc := metric.Desc()
if metricDesc.String() != desc.String() {
Expand All @@ -185,19 +185,19 @@ func TestSimpleMetricsHelper(t *testing.T) {
})

t.Run("should create metrics with different value types", func(t *testing.T) {
metricName := "test_metric"
metricDesc := prometheus.NewDesc(metricName, "test metric help", nil, nil)
metricName := "test_metric"
metricDesc := prometheus.NewDesc(metricName, "test metric help", nil, nil)
metricValue := 42.0

ch := make(chan prometheus.Metric, 3)

helper := &SimpleMetricsHelper{
Channel: ch,
Labels: []string{},
Labels: []string{},
}
helper.NewFloatMetric(metricDesc, prometheus.GaugeValue, metricValue)
helper.NewIntMetric(metricDesc, prometheus.GaugeValue, int(metricValue))
helper.NewInt64Metric(metricDesc, prometheus.GaugeValue, int64(metricValue))
helper.NewUInt64Metric(metricDesc, prometheus.GaugeValue, uint64(metricValue))

close(ch)

Expand All @@ -213,21 +213,21 @@ func TestSimpleMetricsHelper(t *testing.T) {
})

t.Run("should create timestamp metric", func(t *testing.T) {
metricName := "test_metric"
metricDesc := prometheus.NewDesc(metricName, "test metric help", nil, nil)
metricName := "test_metric"
metricDesc := prometheus.NewDesc(metricName, "test metric help", nil, nil)
metricValue := time.UnixMilli(42)

ch := make(chan prometheus.Metric)

go func() {
helper := &SimpleMetricsHelper{
Channel: ch,
Labels: []string{},
Labels: []string{},
}
helper.NewTimestampMetric(metricDesc, prometheus.CounterValue, metricValue)
}()

metric := <- ch
metric := <-ch

fqName, err := ExtractFqName(metric.Desc().String())
if err != nil {
Expand All @@ -249,10 +249,10 @@ func TestSimpleMetricsHelper(t *testing.T) {

func TestExtractTimestampMsFromMetric(t *testing.T) {
t.Run("should extract timestamp from a metric", func(t *testing.T) {
metricDesc := prometheus.NewDesc("test_metric", "test metric help", nil, nil)
metricType := prometheus.GaugeValue
metricDesc := prometheus.NewDesc("test_metric", "test metric help", nil, nil)
metricType := prometheus.GaugeValue
metricValue := time.UnixMilli(42)
metric := prometheus.NewMetricWithTimestamp(metricValue, prometheus.MustNewConstMetric(metricDesc, metricType, 1))
metric := prometheus.NewMetricWithTimestamp(metricValue, prometheus.MustNewConstMetric(metricDesc, metricType, 1))

extractedValue, err := extractTimestampMsFromMetric(metric)
if err != nil {
Expand Down

0 comments on commit 31f05cf

Please sign in to comment.