diff --git a/.chloggen/sqlserver_pc_metrics.yaml b/.chloggen/sqlserver_pc_metrics.yaml new file mode 100644 index 000000000000..9eb7dd5b154a --- /dev/null +++ b/.chloggen/sqlserver_pc_metrics.yaml @@ -0,0 +1,33 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: sqlserverreceiver + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Enable more perf counter metrics when directly connecting to SQL Server + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [33420] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: | + This enables the following metrics by default on non Windows-based systems: + `sqlserver.batch.request.rate` + `sqlserver.batch.sql_compilation.rate` + `sqlserver.batch.sql_recompilation.rate` + `sqlserver.page.buffer_cache.hit_ratio` + `sqlserver.user.connection.count` + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/sqlserverreceiver/documentation.md b/receiver/sqlserverreceiver/documentation.md index e1e2f816880d..ef2412548b77 100644 --- a/receiver/sqlserverreceiver/documentation.md +++ b/receiver/sqlserverreceiver/documentation.md @@ -16,8 +16,6 @@ metrics: Number of batch requests received by SQL Server. -This metric is only available when running on Windows. - | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | | {requests}/s | Gauge | Double | @@ -26,8 +24,6 @@ This metric is only available when running on Windows. Number of SQL compilations needed. -This metric is only available when running on Windows. - | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | | {compilations}/s | Gauge | Double | @@ -36,8 +32,6 @@ This metric is only available when running on Windows. Number of SQL recompilations needed. -This metric is only available when running on Windows. - | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | | {compilations}/s | Gauge | Double | @@ -64,8 +58,6 @@ This metric is only available when running on Windows. Pages found in the buffer pool without having to read from disk. -This metric is only available when running on Windows. - | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | | % | Gauge | Double | @@ -210,8 +202,6 @@ This metric is only available when running on Windows. Number of users connected to the SQL Server. -This metric is only available when running on Windows. - | Unit | Metric Type | Value Type | | ---- | ----------- | ---------- | | {connections} | Gauge | Int | diff --git a/receiver/sqlserverreceiver/metadata.yaml b/receiver/sqlserverreceiver/metadata.yaml index a71a8b5f3e65..b78401de3722 100644 --- a/receiver/sqlserverreceiver/metadata.yaml +++ b/receiver/sqlserverreceiver/metadata.yaml @@ -55,7 +55,6 @@ metrics: unit: "{connections}" gauge: value_type: int - extended_documentation: This metric is only available when running on Windows. sqlserver.lock.wait_time.avg: enabled: true description: Average wait time for all lock requests that had to wait. @@ -75,28 +74,24 @@ metrics: unit: "{requests}/s" gauge: value_type: double - extended_documentation: This metric is only available when running on Windows. sqlserver.batch.sql_compilation.rate: enabled: true description: Number of SQL compilations needed. unit: "{compilations}/s" gauge: value_type: double - extended_documentation: This metric is only available when running on Windows. sqlserver.batch.sql_recompilation.rate: enabled: true description: Number of SQL recompilations needed. unit: "{compilations}/s" gauge: value_type: double - extended_documentation: This metric is only available when running on Windows. sqlserver.page.buffer_cache.hit_ratio: enabled: true description: Pages found in the buffer pool without having to read from disk. unit: "%" gauge: value_type: double - extended_documentation: This metric is only available when running on Windows. sqlserver.page.life_expectancy: enabled: true description: Time a page will stay in the buffer pool. diff --git a/receiver/sqlserverreceiver/scraper.go b/receiver/sqlserverreceiver/scraper.go index c0a0aea32df5..1fe69310d6dc 100644 --- a/receiver/sqlserverreceiver/scraper.go +++ b/receiver/sqlserverreceiver/scraper.go @@ -172,10 +172,15 @@ func (s *sqlServerScraperHelper) recordDatabasePerfCounterMetrics(ctx context.Co const counterKey = "counter" const valueKey = "value" // Constants are the columns for metrics from query + const batchRequestRate = "Batch Requests/sec" + const bufferCacheHitRatio = "Buffer cache hit ratio" const diskReadIOThrottled = "Disk Read IO Throttled/sec" const diskWriteIOThrottled = "Disk Write IO Throttled/sec" const lockWaits = "Lock Waits/sec" const processesBlocked = "Processes blocked" + const sqlCompilationRate = "SQL Compilations/sec" + const sqlReCompilationsRate = "SQL Re-Compilations/sec" + const userConnCount = "User Connections" rows, err := s.client.QueryRows(ctx) @@ -195,6 +200,22 @@ func (s *sqlServerScraperHelper) recordDatabasePerfCounterMetrics(ctx context.Co } switch row[counterKey] { + case batchRequestRate: + val, err := strconv.ParseFloat(row[valueKey], 64) + if err != nil { + err = fmt.Errorf("row %d: %w", i, err) + errs = append(errs, err) + } else { + s.mb.RecordSqlserverBatchRequestRateDataPoint(now, val) + } + case bufferCacheHitRatio: + val, err := strconv.ParseFloat(row[valueKey], 64) + if err != nil { + err = fmt.Errorf("row %d: %w", i, err) + errs = append(errs, err) + } else { + s.mb.RecordSqlserverPageBufferCacheHitRatioDataPoint(now, val) + } case diskReadIOThrottled: errs = append(errs, s.mb.RecordSqlserverResourcePoolDiskThrottledReadRateDataPoint(now, row[valueKey])) case diskWriteIOThrottled: @@ -209,6 +230,30 @@ func (s *sqlServerScraperHelper) recordDatabasePerfCounterMetrics(ctx context.Co } case processesBlocked: errs = append(errs, s.mb.RecordSqlserverProcessesBlockedDataPoint(now, row[valueKey])) + case sqlCompilationRate: + val, err := strconv.ParseFloat(row[valueKey], 64) + if err != nil { + err = fmt.Errorf("row %d: %w", i, err) + errs = append(errs, err) + } else { + s.mb.RecordSqlserverBatchSQLCompilationRateDataPoint(now, val) + } + case sqlReCompilationsRate: + val, err := strconv.ParseFloat(row[valueKey], 64) + if err != nil { + err = fmt.Errorf("row %d: %w", i, err) + errs = append(errs, err) + } else { + s.mb.RecordSqlserverBatchSQLRecompilationRateDataPoint(now, val) + } + case userConnCount: + val, err := strconv.ParseInt(row[valueKey], 10, 64) + if err != nil { + err = fmt.Errorf("row %d: %w", i, err) + errs = append(errs, err) + } else { + s.mb.RecordSqlserverUserConnectionCountDataPoint(now, val) + } } } diff --git a/receiver/sqlserverreceiver/testdata/expectedPerfCounters.yaml b/receiver/sqlserverreceiver/testdata/expectedPerfCounters.yaml index edb17de9fcfb..9cc941eaecf6 100644 --- a/receiver/sqlserverreceiver/testdata/expectedPerfCounters.yaml +++ b/receiver/sqlserverreceiver/testdata/expectedPerfCounters.yaml @@ -6,6 +6,30 @@ resourceMetrics: stringValue: 8cac97ac9b8f scopeMetrics: - metrics: + - description: Number of batch requests received by SQL Server. + gauge: + dataPoints: + - asDouble: 3375 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: sqlserver.batch.request.rate + unit: '{requests}/s' + - description: Number of SQL compilations needed. + gauge: + dataPoints: + - asDouble: 413 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: sqlserver.batch.sql_compilation.rate + unit: '{compilations}/s' + - description: Number of SQL recompilations needed. + gauge: + dataPoints: + - asDouble: 63 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: sqlserver.batch.sql_recompilation.rate + unit: '{compilations}/s' - description: Number of lock requests resulting in a wait. gauge: dataPoints: @@ -14,6 +38,14 @@ resourceMetrics: timeUnixNano: "2000000" name: sqlserver.lock.wait.rate unit: '{requests}/s' + - description: Pages found in the buffer pool without having to read from disk. + gauge: + dataPoints: + - asDouble: 100 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: sqlserver.page.buffer_cache.hit_ratio + unit: '%' - description: The number of processes that are currently blocked gauge: dataPoints: @@ -44,6 +76,14 @@ resourceMetrics: timeUnixNano: "2000000" name: sqlserver.resource_pool.disk.throttled.write.rate unit: '{writes}/s' + - description: Number of users connected to the SQL Server. + gauge: + dataPoints: + - asInt: "3" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: sqlserver.user.connection.count + unit: '{connections}' scope: name: otelcol/sqlserverreceiver version: latest