From a395c5dbd054e136cc0656dc8b908c1e1db362dd Mon Sep 17 00:00:00 2001 From: Alan Protasio Date: Wed, 12 Jul 2023 08:47:08 -0700 Subject: [PATCH] Standardize index cache metrics (#6523) --- cmd/thanos/store.go | 2 +- pkg/store/bucket_e2e_test.go | 6 ++--- pkg/store/bucket_test.go | 12 +++++----- pkg/store/cache/cache.go | 21 +++++++++++++++++ pkg/store/cache/factory.go | 7 +++--- pkg/store/cache/factory_test.go | 29 +++++++++++++++++++++++ pkg/store/cache/inmemory.go | 39 ++++++++++++++----------------- pkg/store/cache/inmemory_test.go | 32 ++++++++++++------------- pkg/store/cache/memcached.go | 31 +++++++++++------------- pkg/store/cache/memcached_test.go | 6 ++--- 10 files changed, 114 insertions(+), 71 deletions(-) create mode 100644 pkg/store/cache/factory_test.go diff --git a/cmd/thanos/store.go b/cmd/thanos/store.go index d189586ac5..bded3b1127 100644 --- a/cmd/thanos/store.go +++ b/cmd/thanos/store.go @@ -323,7 +323,7 @@ func runStore( if len(indexCacheContentYaml) > 0 { indexCache, err = storecache.NewIndexCache(logger, indexCacheContentYaml, reg) } else { - indexCache, err = storecache.NewInMemoryIndexCacheWithConfig(logger, reg, storecache.InMemoryIndexCacheConfig{ + indexCache, err = storecache.NewInMemoryIndexCacheWithConfig(logger, nil, reg, storecache.InMemoryIndexCacheConfig{ MaxSize: model.Bytes(conf.indexCacheSizeBytes), MaxItemSize: storecache.DefaultInMemoryIndexCacheConfig.MaxItemSize, }) diff --git a/pkg/store/bucket_e2e_test.go b/pkg/store/bucket_e2e_test.go index 886536fae9..57049f6fc4 100644 --- a/pkg/store/bucket_e2e_test.go +++ b/pkg/store/bucket_e2e_test.go @@ -495,7 +495,7 @@ func TestBucketStore_e2e(t *testing.T) { } if ok := t.Run("with large, sufficient index cache", func(t *testing.T) { - indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(s.logger, nil, storecache.InMemoryIndexCacheConfig{ + indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(s.logger, nil, nil, storecache.InMemoryIndexCacheConfig{ MaxItemSize: 1e5, MaxSize: 2e5, }) @@ -507,7 +507,7 @@ func TestBucketStore_e2e(t *testing.T) { } t.Run("with small index cache", func(t *testing.T) { - indexCache2, err := storecache.NewInMemoryIndexCacheWithConfig(s.logger, nil, storecache.InMemoryIndexCacheConfig{ + indexCache2, err := storecache.NewInMemoryIndexCacheWithConfig(s.logger, nil, nil, storecache.InMemoryIndexCacheConfig{ MaxItemSize: 50, MaxSize: 100, }) @@ -540,7 +540,7 @@ func TestBucketStore_ManyParts_e2e(t *testing.T) { s := prepareStoreWithTestBlocks(t, dir, bkt, true, NewChunksLimiterFactory(0), NewSeriesLimiterFactory(0), NewBytesLimiterFactory(0), emptyRelabelConfig, allowAllFilterConf) - indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(s.logger, nil, storecache.InMemoryIndexCacheConfig{ + indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(s.logger, nil, nil, storecache.InMemoryIndexCacheConfig{ MaxItemSize: 1e5, MaxSize: 2e5, }) diff --git a/pkg/store/bucket_test.go b/pkg/store/bucket_test.go index e25a121f56..716430d895 100644 --- a/pkg/store/bucket_test.go +++ b/pkg/store/bucket_test.go @@ -1517,7 +1517,7 @@ func TestBucketSeries_OneBlock_InMemIndexCacheSegfault(t *testing.T) { chunkPool, err := pool.NewBucketedBytes(chunkBytesPoolMinSize, chunkBytesPoolMaxSize, 2, 100e7) testutil.Ok(t, err) - indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, storecache.InMemoryIndexCacheConfig{ + indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{ MaxItemSize: 3000, // This is the exact size of cache needed for our *single request*. // This is limited in order to make sure we test evictions. @@ -1820,7 +1820,7 @@ func TestSeries_ErrorUnmarshallingRequestHints(t *testing.T) { fetcher, err := block.NewMetaFetcher(logger, 10, instrBkt, tmpDir, nil, nil) testutil.Ok(tb, err) - indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, storecache.InMemoryIndexCacheConfig{}) + indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{}) testutil.Ok(tb, err) store, err := NewBucketStore( @@ -1911,7 +1911,7 @@ func TestSeries_BlockWithMultipleChunks(t *testing.T) { fetcher, err := block.NewMetaFetcher(logger, 10, instrBkt, tmpDir, nil, nil) testutil.Ok(tb, err) - indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, storecache.InMemoryIndexCacheConfig{}) + indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{}) testutil.Ok(tb, err) store, err := NewBucketStore( @@ -2093,7 +2093,7 @@ func setupStoreForHintsTest(t *testing.T) (testutil.TB, *BucketStore, []*storepb fetcher, err := block.NewMetaFetcher(logger, 10, instrBkt, tmpDir, nil, nil) testutil.Ok(tb, err) - indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, storecache.InMemoryIndexCacheConfig{}) + indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{}) testutil.Ok(tb, err) store, err := NewBucketStore( @@ -2309,7 +2309,7 @@ func TestSeries_ChunksHaveHashRepresentation(t *testing.T) { fetcher, err := block.NewMetaFetcher(logger, 10, instrBkt, tmpDir, nil, nil) testutil.Ok(tb, err) - indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, storecache.InMemoryIndexCacheConfig{}) + indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.InMemoryIndexCacheConfig{}) testutil.Ok(tb, err) store, err := NewBucketStore( @@ -2516,7 +2516,7 @@ func prepareBucket(b *testing.B, resolutionLevel compact.ResolutionLevel) (*buck // Create an index header reader. indexHeaderReader, err := indexheader.NewBinaryReader(ctx, logger, bkt, tmpDir, blockMeta.ULID, DefaultPostingOffsetInMemorySampling) testutil.Ok(b, err) - indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, storecache.DefaultInMemoryIndexCacheConfig) + indexCache, err := storecache.NewInMemoryIndexCacheWithConfig(logger, nil, nil, storecache.DefaultInMemoryIndexCacheConfig) testutil.Ok(b, err) // Create a bucket block with only the dependencies we need for the benchmark. diff --git a/pkg/store/cache/cache.go b/pkg/store/cache/cache.go index 82bf30625e..bcbb727b30 100644 --- a/pkg/store/cache/cache.go +++ b/pkg/store/cache/cache.go @@ -10,6 +10,8 @@ import ( "strings" "github.com/oklog/ulid" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "golang.org/x/crypto/blake2b" @@ -54,6 +56,25 @@ type IndexCache interface { FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) } +// Common metrics that should be used by all cache implementations. +type commonMetrics struct { + requestTotal *prometheus.CounterVec + hitsTotal *prometheus.CounterVec +} + +func newCommonMetrics(reg prometheus.Registerer) *commonMetrics { + return &commonMetrics{ + requestTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_index_cache_requests_total", + Help: "Total number of items requests to the cache.", + }, []string{"item_type"}), + hitsTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_index_cache_hits_total", + Help: "Total number of items requests to the cache that were a hit.", + }, []string{"item_type"}), + } +} + type cacheKey struct { block string key interface{} diff --git a/pkg/store/cache/factory.go b/pkg/store/cache/factory.go index 9b4103a26d..d4e9f0c5cd 100644 --- a/pkg/store/cache/factory.go +++ b/pkg/store/cache/factory.go @@ -34,6 +34,7 @@ type IndexCacheConfig struct { func NewIndexCache(logger log.Logger, confContentYaml []byte, reg prometheus.Registerer) (IndexCache, error) { level.Info(logger).Log("msg", "loading index cache configuration") cacheConfig := &IndexCacheConfig{} + cacheMetrics := newCommonMetrics(reg) if err := yaml.UnmarshalStrict(confContentYaml, cacheConfig); err != nil { return nil, errors.Wrap(err, "parsing config YAML file") } @@ -46,18 +47,18 @@ func NewIndexCache(logger log.Logger, confContentYaml []byte, reg prometheus.Reg var cache IndexCache switch strings.ToUpper(string(cacheConfig.Type)) { case string(INMEMORY): - cache, err = NewInMemoryIndexCache(logger, reg, backendConfig) + cache, err = NewInMemoryIndexCache(logger, cacheMetrics, reg, backendConfig) case string(MEMCACHED): var memcached cacheutil.RemoteCacheClient memcached, err = cacheutil.NewMemcachedClient(logger, "index-cache", backendConfig, reg) if err == nil { - cache, err = NewRemoteIndexCache(logger, memcached, reg) + cache, err = NewRemoteIndexCache(logger, memcached, cacheMetrics, reg) } case string(REDIS): var redisCache cacheutil.RemoteCacheClient redisCache, err = cacheutil.NewRedisClient(logger, "index-cache", backendConfig, reg) if err == nil { - cache, err = NewRemoteIndexCache(logger, redisCache, reg) + cache, err = NewRemoteIndexCache(logger, redisCache, cacheMetrics, reg) } default: return nil, errors.Errorf("index cache with type %s is not supported", cacheConfig.Type) diff --git a/pkg/store/cache/factory_test.go b/pkg/store/cache/factory_test.go new file mode 100644 index 0000000000..e7fb152707 --- /dev/null +++ b/pkg/store/cache/factory_test.go @@ -0,0 +1,29 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package storecache + +import ( + "testing" + + "github.com/efficientgo/core/testutil" + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" +) + +func TestIndexCacheMetrics(t *testing.T) { + reg := prometheus.NewRegistry() + commonMetrics := newCommonMetrics(reg) + + memcached := newMockedMemcachedClient(nil) + _, err := NewRemoteIndexCache(log.NewNopLogger(), memcached, commonMetrics, reg) + testutil.Ok(t, err) + conf := []byte(` +max_size: 10MB +max_item_size: 1MB +`) + // Make sure that the in memory cache does not register the same metrics of the remote index cache. + // If so, we should move those metrics to the `commonMetrics` + _, err = NewInMemoryIndexCache(log.NewNopLogger(), commonMetrics, reg, conf) + testutil.Ok(t, err) +} diff --git a/pkg/store/cache/inmemory.go b/pkg/store/cache/inmemory.go index 8e35f4dca3..d5227285a2 100644 --- a/pkg/store/cache/inmemory.go +++ b/pkg/store/cache/inmemory.go @@ -43,13 +43,13 @@ type InMemoryIndexCache struct { curSize uint64 evicted *prometheus.CounterVec - requests *prometheus.CounterVec - hits *prometheus.CounterVec added *prometheus.CounterVec current *prometheus.GaugeVec currentSize *prometheus.GaugeVec totalCurrentSize *prometheus.GaugeVec overflow *prometheus.CounterVec + + commonMetrics *commonMetrics } // InMemoryIndexCacheConfig holds the in-memory index cache config. @@ -72,26 +72,31 @@ func parseInMemoryIndexCacheConfig(conf []byte) (InMemoryIndexCacheConfig, error // NewInMemoryIndexCache creates a new thread-safe LRU cache for index entries and ensures the total cache // size approximately does not exceed maxBytes. -func NewInMemoryIndexCache(logger log.Logger, reg prometheus.Registerer, conf []byte) (*InMemoryIndexCache, error) { +func NewInMemoryIndexCache(logger log.Logger, commonMetrics *commonMetrics, reg prometheus.Registerer, conf []byte) (*InMemoryIndexCache, error) { config, err := parseInMemoryIndexCacheConfig(conf) if err != nil { return nil, err } - return NewInMemoryIndexCacheWithConfig(logger, reg, config) + return NewInMemoryIndexCacheWithConfig(logger, commonMetrics, reg, config) } // NewInMemoryIndexCacheWithConfig creates a new thread-safe LRU cache for index entries and ensures the total cache // size approximately does not exceed maxBytes. -func NewInMemoryIndexCacheWithConfig(logger log.Logger, reg prometheus.Registerer, config InMemoryIndexCacheConfig) (*InMemoryIndexCache, error) { +func NewInMemoryIndexCacheWithConfig(logger log.Logger, commonMetrics *commonMetrics, reg prometheus.Registerer, config InMemoryIndexCacheConfig) (*InMemoryIndexCache, error) { if config.MaxItemSize > config.MaxSize { return nil, errors.Errorf("max item size (%v) cannot be bigger than overall cache size (%v)", config.MaxItemSize, config.MaxSize) } + if commonMetrics == nil { + commonMetrics = newCommonMetrics(reg) + } + c := &InMemoryIndexCache{ logger: logger, maxSizeBytes: uint64(config.MaxSize), maxItemSizeBytes: uint64(config.MaxItemSize), + commonMetrics: commonMetrics, } c.evicted = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ @@ -110,13 +115,9 @@ func NewInMemoryIndexCacheWithConfig(logger log.Logger, reg prometheus.Registere c.added.WithLabelValues(cacheTypeSeries) c.added.WithLabelValues(cacheTypeExpandedPostings) - c.requests = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_store_index_cache_requests_total", - Help: "Total number of requests to the cache.", - }, []string{"item_type"}) - c.requests.WithLabelValues(cacheTypePostings) - c.requests.WithLabelValues(cacheTypeSeries) - c.requests.WithLabelValues(cacheTypeExpandedPostings) + c.commonMetrics.requestTotal.WithLabelValues(cacheTypePostings) + c.commonMetrics.requestTotal.WithLabelValues(cacheTypeSeries) + c.commonMetrics.requestTotal.WithLabelValues(cacheTypeExpandedPostings) c.overflow = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_index_cache_items_overflowed_total", @@ -126,13 +127,9 @@ func NewInMemoryIndexCacheWithConfig(logger log.Logger, reg prometheus.Registere c.overflow.WithLabelValues(cacheTypeSeries) c.overflow.WithLabelValues(cacheTypeExpandedPostings) - c.hits = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_store_index_cache_hits_total", - Help: "Total number of requests to the cache that were a hit.", - }, []string{"item_type"}) - c.hits.WithLabelValues(cacheTypePostings) - c.hits.WithLabelValues(cacheTypeSeries) - c.hits.WithLabelValues(cacheTypeExpandedPostings) + c.commonMetrics.hitsTotal.WithLabelValues(cacheTypePostings) + c.commonMetrics.hitsTotal.WithLabelValues(cacheTypeSeries) + c.commonMetrics.hitsTotal.WithLabelValues(cacheTypeExpandedPostings) c.current = promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ Name: "thanos_store_index_cache_items", @@ -201,7 +198,7 @@ func (c *InMemoryIndexCache) onEvict(key, val interface{}) { } func (c *InMemoryIndexCache) get(typ string, key cacheKey) ([]byte, bool) { - c.requests.WithLabelValues(typ).Inc() + c.commonMetrics.requestTotal.WithLabelValues(typ).Inc() c.mtx.Lock() defer c.mtx.Unlock() @@ -210,7 +207,7 @@ func (c *InMemoryIndexCache) get(typ string, key cacheKey) ([]byte, bool) { if !ok { return nil, false } - c.hits.WithLabelValues(typ).Inc() + c.commonMetrics.hitsTotal.WithLabelValues(typ).Inc() return v.([]byte), true } diff --git a/pkg/store/cache/inmemory_test.go b/pkg/store/cache/inmemory_test.go index 205a639be1..16b76a20ea 100644 --- a/pkg/store/cache/inmemory_test.go +++ b/pkg/store/cache/inmemory_test.go @@ -25,14 +25,14 @@ import ( func TestNewInMemoryIndexCache(t *testing.T) { // Should return error on invalid YAML config. conf := []byte("invalid") - cache, err := NewInMemoryIndexCache(log.NewNopLogger(), nil, conf) + cache, err := NewInMemoryIndexCache(log.NewNopLogger(), nil, nil, conf) testutil.NotOk(t, err) testutil.Equals(t, (*InMemoryIndexCache)(nil), cache) // Should instance an in-memory index cache with default config // on empty YAML config. conf = []byte{} - cache, err = NewInMemoryIndexCache(log.NewNopLogger(), nil, conf) + cache, err = NewInMemoryIndexCache(log.NewNopLogger(), nil, nil, conf) testutil.Ok(t, err) testutil.Equals(t, uint64(DefaultInMemoryIndexCacheConfig.MaxSize), cache.maxSizeBytes) testutil.Equals(t, uint64(DefaultInMemoryIndexCacheConfig.MaxItemSize), cache.maxItemSizeBytes) @@ -42,7 +42,7 @@ func TestNewInMemoryIndexCache(t *testing.T) { max_size: 1MB max_item_size: 2KB `) - cache, err = NewInMemoryIndexCache(log.NewNopLogger(), nil, conf) + cache, err = NewInMemoryIndexCache(log.NewNopLogger(), nil, nil, conf) testutil.Ok(t, err) testutil.Equals(t, uint64(1024*1024), cache.maxSizeBytes) testutil.Equals(t, uint64(2*1024), cache.maxItemSizeBytes) @@ -52,7 +52,7 @@ max_item_size: 2KB max_size: 2KB max_item_size: 1MB `) - cache, err = NewInMemoryIndexCache(log.NewNopLogger(), nil, conf) + cache, err = NewInMemoryIndexCache(log.NewNopLogger(), nil, nil, conf) testutil.NotOk(t, err) testutil.Equals(t, (*InMemoryIndexCache)(nil), cache) // testutil.Equals(t, uint64(1024*1024), cache.maxSizeBytes) @@ -64,7 +64,7 @@ max_item_size: 1MB func TestInMemoryIndexCache_AvoidsDeadlock(t *testing.T) { metrics := prometheus.NewRegistry() - cache, err := NewInMemoryIndexCacheWithConfig(log.NewNopLogger(), metrics, InMemoryIndexCacheConfig{ + cache, err := NewInMemoryIndexCacheWithConfig(log.NewNopLogger(), nil, metrics, InMemoryIndexCacheConfig{ MaxItemSize: sliceHeaderSize + 5, MaxSize: sliceHeaderSize + 5, }) @@ -114,7 +114,7 @@ func TestInMemoryIndexCache_UpdateItem(t *testing.T) { }) metrics := prometheus.NewRegistry() - cache, err := NewInMemoryIndexCacheWithConfig(log.NewSyncLogger(errorLogger), metrics, InMemoryIndexCacheConfig{ + cache, err := NewInMemoryIndexCacheWithConfig(log.NewSyncLogger(errorLogger), nil, metrics, InMemoryIndexCacheConfig{ MaxItemSize: maxSize, MaxSize: maxSize, }) @@ -208,7 +208,7 @@ func TestInMemoryIndexCache_UpdateItem(t *testing.T) { // This should not happen as we hardcode math.MaxInt, but we still add test to check this out. func TestInMemoryIndexCache_MaxNumberOfItemsHit(t *testing.T) { metrics := prometheus.NewRegistry() - cache, err := NewInMemoryIndexCacheWithConfig(log.NewNopLogger(), metrics, InMemoryIndexCacheConfig{ + cache, err := NewInMemoryIndexCacheWithConfig(log.NewNopLogger(), nil, metrics, InMemoryIndexCacheConfig{ MaxItemSize: 2*sliceHeaderSize + 10, MaxSize: 2*sliceHeaderSize + 10, }) @@ -231,15 +231,15 @@ func TestInMemoryIndexCache_MaxNumberOfItemsHit(t *testing.T) { testutil.Equals(t, float64(0), promtest.ToFloat64(cache.evicted.WithLabelValues(cacheTypeSeries))) testutil.Equals(t, float64(3), promtest.ToFloat64(cache.added.WithLabelValues(cacheTypePostings))) testutil.Equals(t, float64(0), promtest.ToFloat64(cache.added.WithLabelValues(cacheTypeSeries))) - testutil.Equals(t, float64(0), promtest.ToFloat64(cache.requests.WithLabelValues(cacheTypePostings))) - testutil.Equals(t, float64(0), promtest.ToFloat64(cache.requests.WithLabelValues(cacheTypeSeries))) - testutil.Equals(t, float64(0), promtest.ToFloat64(cache.hits.WithLabelValues(cacheTypePostings))) - testutil.Equals(t, float64(0), promtest.ToFloat64(cache.hits.WithLabelValues(cacheTypeSeries))) + testutil.Equals(t, float64(0), promtest.ToFloat64(cache.commonMetrics.requestTotal.WithLabelValues(cacheTypePostings))) + testutil.Equals(t, float64(0), promtest.ToFloat64(cache.commonMetrics.requestTotal.WithLabelValues(cacheTypeSeries))) + testutil.Equals(t, float64(0), promtest.ToFloat64(cache.commonMetrics.hitsTotal.WithLabelValues(cacheTypePostings))) + testutil.Equals(t, float64(0), promtest.ToFloat64(cache.commonMetrics.hitsTotal.WithLabelValues(cacheTypeSeries))) } func TestInMemoryIndexCache_Eviction_WithMetrics(t *testing.T) { metrics := prometheus.NewRegistry() - cache, err := NewInMemoryIndexCacheWithConfig(log.NewNopLogger(), metrics, InMemoryIndexCacheConfig{ + cache, err := NewInMemoryIndexCacheWithConfig(log.NewNopLogger(), nil, metrics, InMemoryIndexCacheConfig{ MaxItemSize: 2*sliceHeaderSize + 5, MaxSize: 2*sliceHeaderSize + 5, }) @@ -429,8 +429,8 @@ func TestInMemoryIndexCache_Eviction_WithMetrics(t *testing.T) { // Other metrics. testutil.Equals(t, float64(4), promtest.ToFloat64(cache.added.WithLabelValues(cacheTypePostings))) testutil.Equals(t, float64(1), promtest.ToFloat64(cache.added.WithLabelValues(cacheTypeSeries))) - testutil.Equals(t, float64(9), promtest.ToFloat64(cache.requests.WithLabelValues(cacheTypePostings))) - testutil.Equals(t, float64(2), promtest.ToFloat64(cache.requests.WithLabelValues(cacheTypeSeries))) - testutil.Equals(t, float64(5), promtest.ToFloat64(cache.hits.WithLabelValues(cacheTypePostings))) - testutil.Equals(t, float64(1), promtest.ToFloat64(cache.hits.WithLabelValues(cacheTypeSeries))) + testutil.Equals(t, float64(9), promtest.ToFloat64(cache.commonMetrics.requestTotal.WithLabelValues(cacheTypePostings))) + testutil.Equals(t, float64(2), promtest.ToFloat64(cache.commonMetrics.requestTotal.WithLabelValues(cacheTypeSeries))) + testutil.Equals(t, float64(5), promtest.ToFloat64(cache.commonMetrics.hitsTotal.WithLabelValues(cacheTypePostings))) + testutil.Equals(t, float64(1), promtest.ToFloat64(cache.commonMetrics.hitsTotal.WithLabelValues(cacheTypeSeries))) } diff --git a/pkg/store/cache/memcached.go b/pkg/store/cache/memcached.go index b80d2d9894..f5ab1c4b02 100644 --- a/pkg/store/cache/memcached.go +++ b/pkg/store/cache/memcached.go @@ -11,7 +11,6 @@ import ( "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" @@ -43,28 +42,24 @@ type RemoteIndexCache struct { } // NewRemoteIndexCache makes a new RemoteIndexCache. -func NewRemoteIndexCache(logger log.Logger, cacheClient cacheutil.RemoteCacheClient, reg prometheus.Registerer) (*RemoteIndexCache, error) { +func NewRemoteIndexCache(logger log.Logger, cacheClient cacheutil.RemoteCacheClient, commonMetrics *commonMetrics, reg prometheus.Registerer) (*RemoteIndexCache, error) { c := &RemoteIndexCache{ logger: logger, memcached: cacheClient, compressionScheme: compressionSchemeStreamedSnappy, // Hardcode it for now. Expose it once we support different types of compressions. } - requests := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_store_index_cache_requests_total", - Help: "Total number of items requests to the cache.", - }, []string{"item_type"}) - c.postingRequests = requests.WithLabelValues(cacheTypePostings) - c.seriesRequests = requests.WithLabelValues(cacheTypeSeries) - c.expandedPostingRequests = requests.WithLabelValues(cacheTypeExpandedPostings) - - hits := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_store_index_cache_hits_total", - Help: "Total number of items requests to the cache that were a hit.", - }, []string{"item_type"}) - c.postingHits = hits.WithLabelValues(cacheTypePostings) - c.seriesHits = hits.WithLabelValues(cacheTypeSeries) - c.expandedPostingHits = hits.WithLabelValues(cacheTypeExpandedPostings) + if commonMetrics == nil { + commonMetrics = newCommonMetrics(reg) + } + + c.postingRequests = commonMetrics.requestTotal.WithLabelValues(cacheTypePostings) + c.seriesRequests = commonMetrics.requestTotal.WithLabelValues(cacheTypeSeries) + c.expandedPostingRequests = commonMetrics.requestTotal.WithLabelValues(cacheTypeExpandedPostings) + + c.postingHits = commonMetrics.hitsTotal.WithLabelValues(cacheTypePostings) + c.seriesHits = commonMetrics.hitsTotal.WithLabelValues(cacheTypeSeries) + c.expandedPostingHits = commonMetrics.hitsTotal.WithLabelValues(cacheTypeExpandedPostings) level.Info(logger).Log("msg", "created index cache") @@ -200,5 +195,5 @@ func (c *RemoteIndexCache) FetchMultiSeries(ctx context.Context, blockID ulid.UL // NewMemcachedIndexCache is alias NewRemoteIndexCache for compatible. func NewMemcachedIndexCache(logger log.Logger, memcached cacheutil.RemoteCacheClient, reg prometheus.Registerer) (*RemoteIndexCache, error) { - return NewRemoteIndexCache(logger, memcached, reg) + return NewRemoteIndexCache(logger, memcached, nil, reg) } diff --git a/pkg/store/cache/memcached_test.go b/pkg/store/cache/memcached_test.go index 47249aa5e9..cda095a853 100644 --- a/pkg/store/cache/memcached_test.go +++ b/pkg/store/cache/memcached_test.go @@ -86,7 +86,7 @@ func TestMemcachedIndexCache_FetchMultiPostings(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { memcached := newMockedMemcachedClient(testData.mockedErr) - c, err := NewRemoteIndexCache(log.NewNopLogger(), memcached, nil) + c, err := NewRemoteIndexCache(log.NewNopLogger(), memcached, nil, nil) testutil.Ok(t, err) // Store the postings expected before running the test. @@ -167,7 +167,7 @@ func TestMemcachedIndexCache_FetchExpandedPostings(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { memcached := newMockedMemcachedClient(testData.mockedErr) - c, err := NewRemoteIndexCache(log.NewNopLogger(), memcached, nil) + c, err := NewRemoteIndexCache(log.NewNopLogger(), memcached, nil, nil) testutil.Ok(t, err) // Store the postings expected before running the test. @@ -262,7 +262,7 @@ func TestMemcachedIndexCache_FetchMultiSeries(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { memcached := newMockedMemcachedClient(testData.mockedErr) - c, err := NewRemoteIndexCache(log.NewNopLogger(), memcached, nil) + c, err := NewRemoteIndexCache(log.NewNopLogger(), memcached, nil, nil) testutil.Ok(t, err) // Store the series expected before running the test.