Skip to content

Commit

Permalink
Making changes in the test cases
Browse files Browse the repository at this point in the history
Added few more test cases to check if the data being downsampled gets converted to aggregated and also a test case to check if non-empty XOR chunks can be iterated through.
Made changes in the test cases.

Signed-off-by: Kartik-Garg <[email protected]>
  • Loading branch information
Kartik-Garg committed Feb 22, 2023
1 parent a159680 commit 6fc86a5
Show file tree
Hide file tree
Showing 3 changed files with 67 additions and 15 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re
- [#6098](https://github.com/thanos-io/thanos/pull/6098) Cache/Redis: upgrade `rueidis` to v0.0.93 to fix potential panic when the client-side caching is disabled.
- [#6103](https://github.com/thanos-io/thanos/pull/6103) Mixins(Rule): Fix query for long rule evaluations.
- [#6121](https://github.com/thanos-io/thanos/pull/6121) Receive: Deduplicate metamonitoring queries.
- [#6137](https://github.com/thanos-io/thanos/pull/6137) Downsample: Repair of non-empty XOR chunks during 1h downsampling.

### Changed

Expand Down
3 changes: 1 addition & 2 deletions pkg/compact/downsample/downsample.go
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,7 @@ func Downsample(
for _, cn := range aggrDataChunks {
ac, ok = cn.Chunk.(*AggrChunk)
if !ok {
level.Warn(logger).Log("Not able to convert non-empty XOR chunks into 5m downsampled Aggregated chunks")
continue
return id, errors.New("Not able to convert non-empty XOR chunks to 5m downsampled aggregated chunks.")
}
}
}
Expand Down
78 changes: 65 additions & 13 deletions pkg/compact/downsample/downsample_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -538,26 +538,23 @@ func TestDownsampleAggrAndEmptyXORChunks(t *testing.T) {
}

func TestDownsampleAggrAndNonEmptyXORChunks(t *testing.T) {

logger := log.NewLogfmtLogger(os.Stderr)
dir := t.TempDir()

ser := &series{lset: labels.FromStrings("__name__", "a")}
aggr := map[AggrType][]sample{
AggrCount: {{t: 1587690299999, v: 20}},
AggrSum: {{t: 1587693590791, v: 255746}},
AggrMin: {{t: 1587690299999, v: 461968}},
AggrMax: {{t: 1587690299999, v: 465870}},
AggrCounter: {{t: 1587690005791, v: 461968}},
AggrCount: {{t: 1587690299999, v: 20}, {t: 1587690599999, v: 20}, {t: 1587690899999, v: 20}},
AggrSum: {{t: 1587690299999, v: 9.276972e+06}, {t: 1587690599999, v: 9.359861e+06}, {t: 1587693590791, v: 255746}},
AggrMin: {{t: 1587690299999, v: 461968}, {t: 1587690599999, v: 466070}, {t: 1587690899999, v: 470131}, {t: 1587691199999, v: 474913}},
AggrMax: {{t: 1587690299999, v: 465870}, {t: 1587690599999, v: 469951}, {t: 1587690899999, v: 474726}},
AggrCounter: {{t: 1587690005791, v: 461968}, {t: 1587690299999, v: 465870}, {t: 1587690599999, v: 469951}},
}
raw := chunkenc.NewXORChunk()
app, err := raw.Appender()
testutil.Ok(t, err)
// this comes in !ok and passes through our newly created funcionality

app.Append(1587690005794, 42.5)
//app.Append(1587690005795, 42.6)
// app.Append(1587690005796, 42.7)
// app.Append(1587690005797, 42.8)
// app.Append(1587690005798, 42.9)

ser.chunks = append(ser.chunks, encodeTestAggrSeries(aggr), chunks.Meta{
MinTime: math.MaxInt64,
MaxTime: math.MinInt64,
Expand All @@ -568,12 +565,67 @@ func TestDownsampleAggrAndNonEmptyXORChunks(t *testing.T) {
mb.addSeries(ser)

fakeMeta := &metadata.Meta{}
// target
fakeMeta.Thanos.Downsample.Resolution = 300_000
// already existing resolution
id, err := Downsample(logger, fakeMeta, mb, dir, 3_600_000)
_ = id
testutil.Ok(t, err)

expected := []map[AggrType][]sample{
{
AggrCount: {{1587690005794, 20}, {1587690005794, 20}, {1587690005794, 21}},
AggrSum: {{1587690005794, 9.276972e+06}, {1587690005794, 9.359861e+06}, {1587690005794, 255788.5}},
AggrMin: {{1587690005794, 461968}, {1587690005794, 466070}, {1587690005794, 470131}, {1587690005794, 42.5}},
AggrMax: {{1587690005794, 465870}, {1587690005794, 469951}, {1587690005794, 474726}},
AggrCounter: {{1587690005791, 461968}, {1587690599999, 469951}, {1587690599999, 469951}},
},
}

_, err = metadata.ReadFromDir(filepath.Join(dir, id.String()))
testutil.Ok(t, err)

indexr, err := index.NewFileReader(filepath.Join(dir, id.String(), block.IndexFilename))
testutil.Ok(t, err)
defer func() { testutil.Ok(t, indexr.Close()) }()

chunkr, err := chunks.NewDirReader(filepath.Join(dir, id.String(), block.ChunksDirname), NewPool())
testutil.Ok(t, err)
defer func() { testutil.Ok(t, chunkr.Close()) }()

pall, err := indexr.Postings(index.AllPostingsKey())
testutil.Ok(t, err)

var series []storage.SeriesRef
for pall.Next() {
series = append(series, pall.At())
}
testutil.Ok(t, pall.Err())
testutil.Equals(t, 1, len(series))

var builder labels.ScratchBuilder
var chks []chunks.Meta
testutil.Ok(t, indexr.Series(series[0], &builder, &chks))

var got []map[AggrType][]sample
for _, c := range chks {
chk, err := chunkr.Chunk(c)
testutil.Ok(t, err)

m := map[AggrType][]sample{}
for _, at := range []AggrType{AggrCount, AggrSum, AggrMin, AggrMax, AggrCounter} {
c, err := chk.(*AggrChunk).Get(at)
if err == ErrAggrNotExist {
continue
}
testutil.Ok(t, err)

buf := m[at]
testutil.Ok(t, expandChunkIterator(c.Iterator(nil), &buf))
m[at] = buf
}
got = append(got, m)
}
testutil.Equals(t, expected, got)

}

func chunksToSeriesIteratable(t *testing.T, inRaw [][]sample, inAggr []map[AggrType][]sample) *series {
Expand Down

0 comments on commit 6fc86a5

Please sign in to comment.